[llvm] [AMDGPU] Remove blocks that only branch to other blocks (PR #184908)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Mar 5 17:52:11 PST 2026
https://github.com/LU-JOHN updated https://github.com/llvm/llvm-project/pull/184908
>From 9969f8b42e9212042bd451be4f62b26af5418cd4 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Wed, 4 Mar 2026 13:28:56 -0600
Subject: [PATCH] Remove blocks that only branch to other blocks
---
llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp | 74 +
.../test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll | 126 +-
llvm/test/CodeGen/AMDGPU/add.ll | 30 +-
.../CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll | 4973 ++---------------
.../CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll | 1783 ++----
.../CodeGen/AMDGPU/amdgcn.bitcast.160bit.ll | 293 +-
.../CodeGen/AMDGPU/amdgcn.bitcast.16bit.ll | 137 +-
.../CodeGen/AMDGPU/amdgcn.bitcast.192bit.ll | 702 +--
.../CodeGen/AMDGPU/amdgcn.bitcast.224bit.ll | 303 +-
.../CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll | 2264 ++------
.../CodeGen/AMDGPU/amdgcn.bitcast.288bit.ll | 313 +-
.../CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll | 1804 +-----
.../CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll | 976 +---
.../CodeGen/AMDGPU/amdgcn.bitcast.352bit.ll | 323 +-
.../CodeGen/AMDGPU/amdgcn.bitcast.384bit.ll | 764 +--
.../CodeGen/AMDGPU/amdgcn.bitcast.448bit.ll | 790 +--
.../CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll | 167 +-
.../CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll | 3190 ++---------
.../CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll | 1610 ++----
.../CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll | 1684 ++----
.../CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll | 1531 ++---
.../CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll | 1758 ++----
.../CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll | 1832 ++----
.../CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll | 1906 ++-----
.../CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll | 1884 ++-----
.../CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll | 1956 ++-----
.../CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll | 977 +---
llvm/test/CodeGen/AMDGPU/bug-vopc-commute.ll | 18 +-
.../test/CodeGen/AMDGPU/carryout-selection.ll | 48 +-
llvm/test/CodeGen/AMDGPU/ctpop16.ll | 10 +-
llvm/test/CodeGen/AMDGPU/ctpop64.ll | 10 +-
.../divergent-branch-uniform-condition.ll | 13 +-
.../test/CodeGen/AMDGPU/dynamic_stackalloc.ll | 8 +-
.../CodeGen/AMDGPU/extract-subvector-16bit.ll | 90 +-
.../test/CodeGen/AMDGPU/flat-saddr-atomics.ll | 42 +-
.../CodeGen/AMDGPU/indirect-addressing-si.ll | 124 +-
llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll | 8 +-
.../CodeGen/AMDGPU/memcpy-crash-issue63986.ll | 100 +-
llvm/test/CodeGen/AMDGPU/mul.ll | 125 +-
llvm/test/CodeGen/AMDGPU/redundant-block.mir | 51 +
...hort-exec-branch-on-unconditional-jump.mir | 19 +-
llvm/test/CodeGen/AMDGPU/set-wave-priority.ll | 9 +-
llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll | 10 +-
llvm/test/CodeGen/AMDGPU/skip-if-dead.ll | 60 +-
llvm/test/CodeGen/AMDGPU/srem.ll | 183 +-
.../transform-block-with-return-to-epilog.ll | 14 +-
llvm/test/CodeGen/AMDGPU/wave32.ll | 48 +-
llvm/test/CodeGen/AMDGPU/xor.ll | 10 +-
48 files changed, 6979 insertions(+), 28171 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/redundant-block.mir
diff --git a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
index 411c14fef63b6..de37ccdb23fac 100644
--- a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
@@ -21,6 +21,7 @@
#include "AMDGPU.h"
#include "GCNSubtarget.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
+#include "SIMachineFunctionInfo.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -40,6 +41,7 @@ class SIPreEmitPeephole {
const SIRegisterInfo *TRI = nullptr;
MachineLoopInfo *MLI = nullptr;
+ bool removeUnconditionalBranchBlocks(MachineFunction &MF);
bool optimizeVccBranch(MachineInstr &MI) const;
void updateMLIBeforeRemovingEdge(MachineBasicBlock *From,
MachineBasicBlock *To) const;
@@ -763,6 +765,72 @@ MachineInstrBuilder SIPreEmitPeephole::createUnpackedMI(MachineInstr &I,
return NewMI;
}
+// Remove blocks that only have an unconditional branch.
+bool SIPreEmitPeephole::removeUnconditionalBranchBlocks(MachineFunction &MF) {
+ SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
+ SmallVector<MachineBasicBlock *, 2> ToRemoveMBB;
+ bool Changed = false;
+
+ for (MachineBasicBlock &MBB : MF) {
+ MachineBasicBlock *Succ = *MBB.succ_begin();
+ // Cannot remove entry block
+ if (&MBB == &MF.front())
+ continue;
+ // Cannot remove self-loops
+ if (Succ == &MBB)
+ continue;
+
+ // Find blocks that only contain an unconditional branch
+ const MachineInstr *firstMI = nullptr;
+ for (const MachineInstr &MI : MBB) {
+ if (MI.isMetaInstruction())
+ continue;
+ firstMI = &MI;
+ break;
+ }
+ if (firstMI && firstMI->isUnconditionalBranch()) {
+ MachineBasicBlock *FallThrough = nullptr;
+ SmallVector<MachineBasicBlock *, 2> PredecessorInRange;
+ for (MachineBasicBlock *Pred : MBB.predecessors()) {
+ // Ensure that re-directing a branch in Pred from MBB to Succ will
+ // not create a long branch. If a register has been reserved for long
+ // branching and Pred is too many basic blocks away from Succ a long
+ // branch may be created. Using the block difference is a crude but
+ // cheap estimate for the distance. A very conservative limit has been
+ // chosen.
+ constexpr unsigned BlockNumDiffLimit = 20;
+ if (!FuncInfo->getLongBranchReservedReg() ||
+ std::abs(Pred->getNumber() - Succ->getNumber()) <
+ BlockNumDiffLimit) {
+ if (is_contained(PredecessorInRange, Pred))
+ continue;
+ PredecessorInRange.push_back(Pred);
+ if (Pred->getFallThrough(false) == &MBB)
+ FallThrough = Pred;
+ }
+ }
+ for (MachineBasicBlock *Pred : PredecessorInRange) {
+ Changed = true;
+ Pred->ReplaceUsesOfBlockWith(&MBB, Succ);
+ }
+ if (MBB.predecessors().empty()) {
+ MBB.removeSuccessor(Succ);
+ MBB.clear();
+ ToRemoveMBB.push_back(&MBB);
+ }
+ if (FallThrough && !FallThrough->isLayoutSuccessor(Succ))
+ BuildMI(*FallThrough, FallThrough->end(),
+ FallThrough->findBranchDebugLoc(), TII->get(AMDGPU::S_BRANCH))
+ .addMBB(Succ);
+ }
+ }
+
+ for (MachineBasicBlock *MBB : ToRemoveMBB)
+ MBB->eraseFromParent();
+
+ return Changed;
+}
+
PreservedAnalyses
llvm::SIPreEmitPeepholePass::run(MachineFunction &MF,
MachineFunctionAnalysisManager &MFAM) {
@@ -842,6 +910,12 @@ bool SIPreEmitPeephole::run(MachineFunction &MF, MachineLoopInfo *LoopInfo) {
}
}
+ // Branch optimizations in optimizeVccBranch can create blocks that only have
+ // an unconditional branch. Opportunities may have also originated prior to
+ // optimizeVccBranch.
+ if (removeUnconditionalBranchBlocks(MF))
+ Changed = true;
+
// TODO: Fold this into previous block, if possible. Evaluate and handle any
// side effects.
diff --git a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
index 3dac24ed89fa0..d5fa9777da4c0 100644
--- a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
+++ b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
@@ -17452,26 +17452,23 @@ define void @flat_atomic_fadd_f32_saddr_ret_a_a(ptr inreg %ptr) #0 {
; GFX90A-NEXT: ;;#ASMEND
; GFX90A-NEXT: s_andn2_b64 vcc, exec, s[6:7]
; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0
-; GFX90A-NEXT: s_cbranch_vccz .LBB223_3
+; GFX90A-NEXT: s_cbranch_vccz .LBB223_6
; GFX90A-NEXT: ; %bb.1: ; %atomicrmw.check.private
; GFX90A-NEXT: s_mov_b64 s[6:7], src_private_base
; GFX90A-NEXT: s_cmp_eq_u32 s5, s7
; GFX90A-NEXT: s_cselect_b64 s[6:7], -1, 0
; GFX90A-NEXT: s_andn2_b64 vcc, exec, s[6:7]
-; GFX90A-NEXT: s_cbranch_vccz .LBB223_4
+; GFX90A-NEXT: s_cbranch_vccz .LBB223_3
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.global
; GFX90A-NEXT: v_mov_b32_e32 v1, 0
; GFX90A-NEXT: global_atomic_add_f32 v1, v1, v0, s[4:5] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_accvgpr_write_b32 a0, v1
-; GFX90A-NEXT: s_cbranch_execz .LBB223_5
-; GFX90A-NEXT: s_branch .LBB223_6
+; GFX90A-NEXT: s_cbranch_execz .LBB223_4
+; GFX90A-NEXT: s_branch .LBB223_5
; GFX90A-NEXT: .LBB223_3:
; GFX90A-NEXT: ; implicit-def: $agpr0
-; GFX90A-NEXT: s_branch .LBB223_7
-; GFX90A-NEXT: .LBB223_4:
-; GFX90A-NEXT: ; implicit-def: $agpr0
-; GFX90A-NEXT: .LBB223_5: ; %atomicrmw.private
+; GFX90A-NEXT: .LBB223_4: ; %atomicrmw.private
; GFX90A-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX90A-NEXT: s_cselect_b32 s6, s4, -1
; GFX90A-NEXT: v_mov_b32_e32 v1, s6
@@ -17480,16 +17477,16 @@ define void @flat_atomic_fadd_f32_saddr_ret_a_a(ptr inreg %ptr) #0 {
; GFX90A-NEXT: v_add_f32_e32 v3, v2, v0
; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2
; GFX90A-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen
-; GFX90A-NEXT: .LBB223_6: ; %Flow1
-; GFX90A-NEXT: s_cbranch_execnz .LBB223_8
-; GFX90A-NEXT: .LBB223_7: ; %atomicrmw.shared
+; GFX90A-NEXT: .LBB223_5: ; %Flow1
+; GFX90A-NEXT: s_cbranch_execnz .LBB223_7
+; GFX90A-NEXT: .LBB223_6: ; %atomicrmw.shared
; GFX90A-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX90A-NEXT: s_cselect_b32 s4, s4, -1
; GFX90A-NEXT: v_mov_b32_e32 v1, s4
; GFX90A-NEXT: ds_add_rtn_f32 v0, v1, v0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0
-; GFX90A-NEXT: .LBB223_8: ; %atomicrmw.end
+; GFX90A-NEXT: .LBB223_7: ; %atomicrmw.end
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; use a0
; GFX90A-NEXT: ;;#ASMEND
@@ -17533,24 +17530,21 @@ define void @flat_atomic_fadd_f32_saddr_ret_av_av(ptr inreg %ptr) #0 {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v0
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: s_cbranch_vccz .LBB224_3
+; GFX90A-NEXT: s_cbranch_vccz .LBB224_6
; GFX90A-NEXT: ; %bb.1: ; %atomicrmw.check.private
; GFX90A-NEXT: s_mov_b64 s[6:7], src_private_base
; GFX90A-NEXT: s_cmp_eq_u32 s5, s7
; GFX90A-NEXT: s_cselect_b64 s[6:7], -1, 0
; GFX90A-NEXT: s_andn2_b64 vcc, exec, s[6:7]
-; GFX90A-NEXT: s_cbranch_vccz .LBB224_4
+; GFX90A-NEXT: s_cbranch_vccz .LBB224_3
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.global
; GFX90A-NEXT: v_mov_b32_e32 v1, 0
; GFX90A-NEXT: global_atomic_add_f32 v1, v1, v0, s[4:5] glc
-; GFX90A-NEXT: s_cbranch_execz .LBB224_5
-; GFX90A-NEXT: s_branch .LBB224_6
+; GFX90A-NEXT: s_cbranch_execz .LBB224_4
+; GFX90A-NEXT: s_branch .LBB224_5
; GFX90A-NEXT: .LBB224_3:
; GFX90A-NEXT: ; implicit-def: $vgpr1
-; GFX90A-NEXT: s_branch .LBB224_7
-; GFX90A-NEXT: .LBB224_4:
-; GFX90A-NEXT: ; implicit-def: $vgpr1
-; GFX90A-NEXT: .LBB224_5: ; %atomicrmw.private
+; GFX90A-NEXT: .LBB224_4: ; %atomicrmw.private
; GFX90A-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX90A-NEXT: s_cselect_b32 s6, s4, -1
; GFX90A-NEXT: v_mov_b32_e32 v2, s6
@@ -17558,16 +17552,16 @@ define void @flat_atomic_fadd_f32_saddr_ret_av_av(ptr inreg %ptr) #0 {
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_add_f32_e32 v3, v1, v0
; GFX90A-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen
-; GFX90A-NEXT: .LBB224_6: ; %Flow1
-; GFX90A-NEXT: s_cbranch_execnz .LBB224_8
-; GFX90A-NEXT: .LBB224_7: ; %atomicrmw.shared
+; GFX90A-NEXT: .LBB224_5: ; %Flow1
+; GFX90A-NEXT: s_cbranch_execnz .LBB224_7
+; GFX90A-NEXT: .LBB224_6: ; %atomicrmw.shared
; GFX90A-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX90A-NEXT: s_cselect_b32 s4, s4, -1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s4
; GFX90A-NEXT: ds_add_rtn_f32 v1, v1, v0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB224_8: ; %atomicrmw.end
+; GFX90A-NEXT: .LBB224_7: ; %atomicrmw.end
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; use v1
@@ -18296,27 +18290,24 @@ define void @flat_atomic_fadd_f64_saddr_ret_a_a(ptr inreg %ptr) #0 {
; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0
; GFX90A-NEXT: s_andn2_b64 vcc, exec, s[6:7]
; GFX90A-NEXT: v_accvgpr_read_b32 v1, a1
-; GFX90A-NEXT: s_cbranch_vccz .LBB235_3
+; GFX90A-NEXT: s_cbranch_vccz .LBB235_6
; GFX90A-NEXT: ; %bb.1: ; %atomicrmw.check.private
; GFX90A-NEXT: s_mov_b64 s[6:7], src_private_base
; GFX90A-NEXT: s_cmp_eq_u32 s5, s7
; GFX90A-NEXT: s_cselect_b64 s[6:7], -1, 0
; GFX90A-NEXT: s_andn2_b64 vcc, exec, s[6:7]
-; GFX90A-NEXT: s_cbranch_vccz .LBB235_4
+; GFX90A-NEXT: s_cbranch_vccz .LBB235_3
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.global
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
; GFX90A-NEXT: global_atomic_add_f64 v[2:3], v2, v[0:1], s[4:5] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2
; GFX90A-NEXT: v_accvgpr_write_b32 a1, v3
-; GFX90A-NEXT: s_cbranch_execz .LBB235_5
-; GFX90A-NEXT: s_branch .LBB235_6
+; GFX90A-NEXT: s_cbranch_execz .LBB235_4
+; GFX90A-NEXT: s_branch .LBB235_5
; GFX90A-NEXT: .LBB235_3:
; GFX90A-NEXT: ; implicit-def: $agpr0_agpr1
-; GFX90A-NEXT: s_branch .LBB235_7
-; GFX90A-NEXT: .LBB235_4:
-; GFX90A-NEXT: ; implicit-def: $agpr0_agpr1
-; GFX90A-NEXT: .LBB235_5: ; %atomicrmw.private
+; GFX90A-NEXT: .LBB235_4: ; %atomicrmw.private
; GFX90A-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX90A-NEXT: s_cselect_b32 s6, s4, -1
; GFX90A-NEXT: v_mov_b32_e32 v6, s6
@@ -18328,9 +18319,9 @@ define void @flat_atomic_fadd_f64_saddr_ret_a_a(ptr inreg %ptr) #0 {
; GFX90A-NEXT: v_accvgpr_write_b32 a1, v3
; GFX90A-NEXT: buffer_store_dword v4, v6, s[0:3], 0 offen
; GFX90A-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen offset:4
-; GFX90A-NEXT: .LBB235_6: ; %Flow1
-; GFX90A-NEXT: s_cbranch_execnz .LBB235_8
-; GFX90A-NEXT: .LBB235_7: ; %atomicrmw.shared
+; GFX90A-NEXT: .LBB235_5: ; %Flow1
+; GFX90A-NEXT: s_cbranch_execnz .LBB235_7
+; GFX90A-NEXT: .LBB235_6: ; %atomicrmw.shared
; GFX90A-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX90A-NEXT: s_cselect_b32 s4, s4, -1
; GFX90A-NEXT: v_mov_b32_e32 v2, s4
@@ -18338,7 +18329,7 @@ define void @flat_atomic_fadd_f64_saddr_ret_a_a(ptr inreg %ptr) #0 {
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0
; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1
-; GFX90A-NEXT: .LBB235_8: ; %atomicrmw.end
+; GFX90A-NEXT: .LBB235_7: ; %atomicrmw.end
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; use a[0:1]
; GFX90A-NEXT: ;;#ASMEND
@@ -18359,27 +18350,24 @@ define void @flat_atomic_fadd_f64_saddr_ret_a_a(ptr inreg %ptr) #0 {
; GFX950-NEXT: v_accvgpr_read_b32 v0, a0
; GFX950-NEXT: s_andn2_b64 vcc, exec, s[2:3]
; GFX950-NEXT: v_accvgpr_read_b32 v1, a1
-; GFX950-NEXT: s_cbranch_vccz .LBB235_3
+; GFX950-NEXT: s_cbranch_vccz .LBB235_6
; GFX950-NEXT: ; %bb.1: ; %atomicrmw.check.private
; GFX950-NEXT: s_mov_b64 s[2:3], src_private_base
; GFX950-NEXT: s_cmp_eq_u32 s1, s3
; GFX950-NEXT: s_cselect_b64 s[2:3], -1, 0
; GFX950-NEXT: s_andn2_b64 vcc, exec, s[2:3]
-; GFX950-NEXT: s_cbranch_vccz .LBB235_4
+; GFX950-NEXT: s_cbranch_vccz .LBB235_3
; GFX950-NEXT: ; %bb.2: ; %atomicrmw.global
; GFX950-NEXT: v_mov_b32_e32 v2, 0
; GFX950-NEXT: global_atomic_add_f64 v[2:3], v2, v[0:1], s[0:1] sc0
; GFX950-NEXT: s_waitcnt vmcnt(0)
; GFX950-NEXT: v_accvgpr_write_b32 a0, v2
; GFX950-NEXT: v_accvgpr_write_b32 a1, v3
-; GFX950-NEXT: s_cbranch_execz .LBB235_5
-; GFX950-NEXT: s_branch .LBB235_6
+; GFX950-NEXT: s_cbranch_execz .LBB235_4
+; GFX950-NEXT: s_branch .LBB235_5
; GFX950-NEXT: .LBB235_3:
; GFX950-NEXT: ; implicit-def: $agpr0_agpr1
-; GFX950-NEXT: s_branch .LBB235_7
-; GFX950-NEXT: .LBB235_4:
-; GFX950-NEXT: ; implicit-def: $agpr0_agpr1
-; GFX950-NEXT: .LBB235_5: ; %atomicrmw.private
+; GFX950-NEXT: .LBB235_4: ; %atomicrmw.private
; GFX950-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX950-NEXT: s_cselect_b32 s2, s0, -1
; GFX950-NEXT: scratch_load_dwordx2 v[2:3], off, s2
@@ -18388,9 +18376,9 @@ define void @flat_atomic_fadd_f64_saddr_ret_a_a(ptr inreg %ptr) #0 {
; GFX950-NEXT: v_add_f64 v[4:5], v[2:3], v[0:1]
; GFX950-NEXT: v_accvgpr_write_b32 a1, v3
; GFX950-NEXT: scratch_store_dwordx2 off, v[4:5], s2
-; GFX950-NEXT: .LBB235_6: ; %Flow1
-; GFX950-NEXT: s_cbranch_execnz .LBB235_8
-; GFX950-NEXT: .LBB235_7: ; %atomicrmw.shared
+; GFX950-NEXT: .LBB235_5: ; %Flow1
+; GFX950-NEXT: s_cbranch_execnz .LBB235_7
+; GFX950-NEXT: .LBB235_6: ; %atomicrmw.shared
; GFX950-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX950-NEXT: s_cselect_b32 s0, s0, -1
; GFX950-NEXT: v_mov_b32_e32 v2, s0
@@ -18398,7 +18386,7 @@ define void @flat_atomic_fadd_f64_saddr_ret_a_a(ptr inreg %ptr) #0 {
; GFX950-NEXT: s_waitcnt lgkmcnt(0)
; GFX950-NEXT: v_accvgpr_write_b32 a0, v0
; GFX950-NEXT: v_accvgpr_write_b32 a1, v1
-; GFX950-NEXT: .LBB235_8: ; %atomicrmw.end
+; GFX950-NEXT: .LBB235_7: ; %atomicrmw.end
; GFX950-NEXT: ;;#ASMSTART
; GFX950-NEXT: ; use a[0:1]
; GFX950-NEXT: ;;#ASMEND
@@ -18424,24 +18412,21 @@ define void @flat_atomic_fadd_f64_saddr_ret_av_av(ptr inreg %ptr) #0 {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:1]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: s_cbranch_vccz .LBB236_3
+; GFX90A-NEXT: s_cbranch_vccz .LBB236_6
; GFX90A-NEXT: ; %bb.1: ; %atomicrmw.check.private
; GFX90A-NEXT: s_mov_b64 s[6:7], src_private_base
; GFX90A-NEXT: s_cmp_eq_u32 s5, s7
; GFX90A-NEXT: s_cselect_b64 s[6:7], -1, 0
; GFX90A-NEXT: s_andn2_b64 vcc, exec, s[6:7]
-; GFX90A-NEXT: s_cbranch_vccz .LBB236_4
+; GFX90A-NEXT: s_cbranch_vccz .LBB236_3
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.global
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
; GFX90A-NEXT: global_atomic_add_f64 v[2:3], v2, v[0:1], s[4:5] glc
-; GFX90A-NEXT: s_cbranch_execz .LBB236_5
-; GFX90A-NEXT: s_branch .LBB236_6
+; GFX90A-NEXT: s_cbranch_execz .LBB236_4
+; GFX90A-NEXT: s_branch .LBB236_5
; GFX90A-NEXT: .LBB236_3:
; GFX90A-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX90A-NEXT: s_branch .LBB236_7
-; GFX90A-NEXT: .LBB236_4:
-; GFX90A-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX90A-NEXT: .LBB236_5: ; %atomicrmw.private
+; GFX90A-NEXT: .LBB236_4: ; %atomicrmw.private
; GFX90A-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX90A-NEXT: s_cselect_b32 s6, s4, -1
; GFX90A-NEXT: v_mov_b32_e32 v6, s6
@@ -18451,16 +18436,16 @@ define void @flat_atomic_fadd_f64_saddr_ret_av_av(ptr inreg %ptr) #0 {
; GFX90A-NEXT: v_add_f64 v[4:5], v[2:3], v[0:1]
; GFX90A-NEXT: buffer_store_dword v4, v6, s[0:3], 0 offen
; GFX90A-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen offset:4
-; GFX90A-NEXT: .LBB236_6: ; %Flow1
-; GFX90A-NEXT: s_cbranch_execnz .LBB236_8
-; GFX90A-NEXT: .LBB236_7: ; %atomicrmw.shared
+; GFX90A-NEXT: .LBB236_5: ; %Flow1
+; GFX90A-NEXT: s_cbranch_execnz .LBB236_7
+; GFX90A-NEXT: .LBB236_6: ; %atomicrmw.shared
; GFX90A-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX90A-NEXT: s_cselect_b32 s4, s4, -1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v2, s4
; GFX90A-NEXT: ds_add_rtn_f64 v[2:3], v2, v[0:1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB236_8: ; %atomicrmw.end
+; GFX90A-NEXT: .LBB236_7: ; %atomicrmw.end
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; use v[2:3]
@@ -18479,40 +18464,37 @@ define void @flat_atomic_fadd_f64_saddr_ret_av_av(ptr inreg %ptr) #0 {
; GFX950-NEXT: ;;#ASMSTART
; GFX950-NEXT: ; def v[0:1]
; GFX950-NEXT: ;;#ASMEND
-; GFX950-NEXT: s_cbranch_vccz .LBB236_3
+; GFX950-NEXT: s_cbranch_vccz .LBB236_6
; GFX950-NEXT: ; %bb.1: ; %atomicrmw.check.private
; GFX950-NEXT: s_mov_b64 s[2:3], src_private_base
; GFX950-NEXT: s_cmp_eq_u32 s1, s3
; GFX950-NEXT: s_cselect_b64 s[2:3], -1, 0
; GFX950-NEXT: s_andn2_b64 vcc, exec, s[2:3]
-; GFX950-NEXT: s_cbranch_vccz .LBB236_4
+; GFX950-NEXT: s_cbranch_vccz .LBB236_3
; GFX950-NEXT: ; %bb.2: ; %atomicrmw.global
; GFX950-NEXT: v_mov_b32_e32 v2, 0
; GFX950-NEXT: global_atomic_add_f64 v[2:3], v2, v[0:1], s[0:1] sc0
-; GFX950-NEXT: s_cbranch_execz .LBB236_5
-; GFX950-NEXT: s_branch .LBB236_6
+; GFX950-NEXT: s_cbranch_execz .LBB236_4
+; GFX950-NEXT: s_branch .LBB236_5
; GFX950-NEXT: .LBB236_3:
; GFX950-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX950-NEXT: s_branch .LBB236_7
-; GFX950-NEXT: .LBB236_4:
-; GFX950-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX950-NEXT: .LBB236_5: ; %atomicrmw.private
+; GFX950-NEXT: .LBB236_4: ; %atomicrmw.private
; GFX950-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX950-NEXT: s_cselect_b32 s2, s0, -1
; GFX950-NEXT: scratch_load_dwordx2 v[2:3], off, s2
; GFX950-NEXT: s_waitcnt vmcnt(0)
; GFX950-NEXT: v_add_f64 v[4:5], v[2:3], v[0:1]
; GFX950-NEXT: scratch_store_dwordx2 off, v[4:5], s2
-; GFX950-NEXT: .LBB236_6: ; %Flow1
-; GFX950-NEXT: s_cbranch_execnz .LBB236_8
-; GFX950-NEXT: .LBB236_7: ; %atomicrmw.shared
+; GFX950-NEXT: .LBB236_5: ; %Flow1
+; GFX950-NEXT: s_cbranch_execnz .LBB236_7
+; GFX950-NEXT: .LBB236_6: ; %atomicrmw.shared
; GFX950-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX950-NEXT: s_cselect_b32 s0, s0, -1
; GFX950-NEXT: s_waitcnt vmcnt(0)
; GFX950-NEXT: v_mov_b32_e32 v2, s0
; GFX950-NEXT: ds_add_rtn_f64 v[2:3], v2, v[0:1]
; GFX950-NEXT: s_waitcnt lgkmcnt(0)
-; GFX950-NEXT: .LBB236_8: ; %atomicrmw.end
+; GFX950-NEXT: .LBB236_7: ; %atomicrmw.end
; GFX950-NEXT: s_waitcnt vmcnt(0)
; GFX950-NEXT: ;;#ASMSTART
; GFX950-NEXT: ; use v[2:3]
diff --git a/llvm/test/CodeGen/AMDGPU/add.ll b/llvm/test/CodeGen/AMDGPU/add.ll
index b8814b64735e6..d6d44b80ba528 100644
--- a/llvm/test/CodeGen/AMDGPU/add.ll
+++ b/llvm/test/CodeGen/AMDGPU/add.ll
@@ -1157,7 +1157,7 @@ define amdgpu_kernel void @add64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: v_cmp_ne_u64_e64 s[10:11], s[4:5], 0
; GFX6-NEXT: s_and_b64 vcc, exec, s[10:11]
-; GFX6-NEXT: s_cbranch_vccz .LBB9_4
+; GFX6-NEXT: s_cbranch_vccz .LBB9_2
; GFX6-NEXT: ; %bb.1: ; %else
; GFX6-NEXT: s_add_u32 s4, s4, s6
; GFX6-NEXT: s_addc_u32 s5, s5, s7
@@ -1173,9 +1173,6 @@ define amdgpu_kernel void @add64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX6-NEXT: v_mov_b32_e32 v1, s5
; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
-; GFX6-NEXT: .LBB9_4:
-; GFX6-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX6-NEXT: s_branch .LBB9_2
;
; GFX8-LABEL: add64_in_branch:
; GFX8: ; %bb.0: ; %entry
@@ -1183,7 +1180,7 @@ define amdgpu_kernel void @add64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX8-NEXT: s_mov_b64 s[8:9], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX8-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX8-NEXT: s_cbranch_scc0 .LBB9_2
; GFX8-NEXT: ; %bb.1: ; %else
; GFX8-NEXT: s_add_u32 s4, s4, s6
; GFX8-NEXT: s_addc_u32 s5, s5, s7
@@ -1199,9 +1196,6 @@ define amdgpu_kernel void @add64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX8-NEXT: v_mov_b32_e32 v3, s5
; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
-; GFX8-NEXT: .LBB9_4:
-; GFX8-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX8-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: add64_in_branch:
; GFX9: ; %bb.0: ; %entry
@@ -1209,7 +1203,7 @@ define amdgpu_kernel void @add64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX9-NEXT: s_mov_b64 s[2:3], 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u64 s[12:13], 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %else
; GFX9-NEXT: s_add_u32 s0, s12, s14
; GFX9-NEXT: s_addc_u32 s1, s13, s15
@@ -1224,16 +1218,13 @@ define amdgpu_kernel void @add64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
; GFX9-NEXT: s_endpgm
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: ; implicit-def: $sgpr0_sgpr1
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX10-LABEL: add64_in_branch:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_cmp_lg_u64 s[12:13], 0
-; GFX10-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX10-NEXT: s_cbranch_scc0 .LBB9_2
; GFX10-NEXT: ; %bb.1: ; %else
; GFX10-NEXT: s_add_u32 s0, s12, s14
; GFX10-NEXT: s_addc_u32 s1, s13, s15
@@ -1247,16 +1238,13 @@ define amdgpu_kernel void @add64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX10-NEXT: v_mov_b32_e32 v1, s1
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
; GFX10-NEXT: s_endpgm
-; GFX10-NEXT: .LBB9_4:
-; GFX10-NEXT: ; implicit-def: $sgpr0_sgpr1
-; GFX10-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: add64_in_branch:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %else
; GFX11-NEXT: s_add_u32 s4, s4, s6
; GFX11-NEXT: s_addc_u32 s5, s5, s7
@@ -1269,16 +1257,13 @@ define amdgpu_kernel void @add64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_endpgm
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX11-NEXT: s_branch .LBB9_2
;
; GFX12-LABEL: add64_in_branch:
; GFX12: ; %bb.0: ; %entry
; GFX12-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX12-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX12-NEXT: s_cbranch_scc0 .LBB9_2
; GFX12-NEXT: ; %bb.1: ; %else
; GFX12-NEXT: s_add_nc_u64 s[4:5], s[4:5], s[6:7]
; GFX12-NEXT: s_cbranch_execnz .LBB9_3
@@ -1290,9 +1275,6 @@ define amdgpu_kernel void @add64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX12-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_endpgm
-; GFX12-NEXT: .LBB9_4:
-; GFX12-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX12-NEXT: s_branch .LBB9_2
entry:
%0 = icmp eq i64 %a, 0
br i1 %0, label %if, label %else
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
index ff31d5e774784..93746b90fb34b 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
@@ -251,7 +251,7 @@ define inreg <32 x float> @bitcast_v32i32_to_v32f32_scalar(<32 x i32> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s46, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s47, v0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -321,8 +321,6 @@ define inreg <32 x float> @bitcast_v32i32_to_v32f32_scalar(<32 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v30, s7
; SI-NEXT: v_mov_b32_e32 v31, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v32i32_to_v32f32_scalar:
; VI: ; %bb.0:
@@ -347,7 +345,7 @@ define inreg <32 x float> @bitcast_v32i32_to_v32f32_scalar(<32 x i32> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -417,8 +415,6 @@ define inreg <32 x float> @bitcast_v32i32_to_v32f32_scalar(<32 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v30, s7
; VI-NEXT: v_mov_b32_e32 v31, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v32i32_to_v32f32_scalar:
; GFX9: ; %bb.0:
@@ -443,7 +439,7 @@ define inreg <32 x float> @bitcast_v32i32_to_v32f32_scalar(<32 x i32> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s46, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s47, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -513,8 +509,6 @@ define inreg <32 x float> @bitcast_v32i32_to_v32f32_scalar(<32 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v30, s7
; GFX9-NEXT: v_mov_b32_e32 v31, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v32i32_to_v32f32_scalar:
; GFX11: ; %bb.0:
@@ -536,7 +530,7 @@ define inreg <32 x float> @bitcast_v32i32_to_v32f32_scalar(<32 x i32> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s41, v0
; GFX11-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-NEXT: s_mov_b32 s42, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -592,8 +586,6 @@ define inreg <32 x float> @bitcast_v32i32_to_v32f32_scalar(<32 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v28, s7 :: v_dual_mov_b32 v29, s6
; GFX11-NEXT: v_dual_mov_b32 v30, s5 :: v_dual_mov_b32 v31, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -872,9 +864,9 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v31, s67, 1.0
; SI-NEXT: v_add_f32_e64 v30, s66, 1.0
@@ -908,10 +900,8 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB3_5
+; SI-NEXT: s_branch .LBB3_4
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -944,7 +934,7 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB3_5: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: v_readlane_b32 s67, v32, 15
; SI-NEXT: v_readlane_b32 s66, v32, 14
; SI-NEXT: v_readlane_b32 s65, v32, 13
@@ -1023,9 +1013,9 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v31, s67, 1.0
; VI-NEXT: v_add_f32_e64 v30, s66, 1.0
@@ -1059,10 +1049,8 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB3_5
+; VI-NEXT: s_branch .LBB3_4
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -1095,7 +1083,7 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB3_5: ; %end
+; VI-NEXT: .LBB3_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v32, 15
; VI-NEXT: v_readlane_b32 s66, v32, 14
; VI-NEXT: v_readlane_b32 s65, v32, 13
@@ -1174,9 +1162,9 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v31, s67, 1.0
; GFX9-NEXT: v_add_f32_e64 v30, s66, 1.0
@@ -1210,10 +1198,8 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB3_5
+; GFX9-NEXT: s_branch .LBB3_4
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -1246,7 +1232,7 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB3_5: ; %end
+; GFX9-NEXT: .LBB3_4: ; %end
; GFX9-NEXT: v_readlane_b32 s67, v32, 15
; GFX9-NEXT: v_readlane_b32 s66, v32, 14
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
@@ -1326,10 +1312,10 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-NEXT: v_writelane_b32 v32, s67, 15
; GFX11-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v31, s67, 1.0
; GFX11-NEXT: v_add_f32_e64 v30, s66, 1.0
@@ -1363,10 +1349,8 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB3_5
+; GFX11-NEXT: s_branch .LBB3_4
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -1383,7 +1367,7 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB3_5: ; %end
+; GFX11-NEXT: .LBB3_4: ; %end
; GFX11-NEXT: v_readlane_b32 s67, v32, 15
; GFX11-NEXT: v_readlane_b32 s66, v32, 14
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
@@ -1665,7 +1649,7 @@ define inreg <16 x i64> @bitcast_v32i32_to_v16i64_scalar(<32 x i32> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s46, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s47, v0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB5_3
; SI-NEXT: .LBB5_2: ; %cmp.true
@@ -1735,8 +1719,6 @@ define inreg <16 x i64> @bitcast_v32i32_to_v16i64_scalar(<32 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v30, s7
; SI-NEXT: v_mov_b32_e32 v31, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v32i32_to_v16i64_scalar:
; VI: ; %bb.0:
@@ -1761,7 +1743,7 @@ define inreg <16 x i64> @bitcast_v32i32_to_v16i64_scalar(<32 x i32> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -1831,8 +1813,6 @@ define inreg <16 x i64> @bitcast_v32i32_to_v16i64_scalar(<32 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v30, s7
; VI-NEXT: v_mov_b32_e32 v31, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v32i32_to_v16i64_scalar:
; GFX9: ; %bb.0:
@@ -1857,7 +1837,7 @@ define inreg <16 x i64> @bitcast_v32i32_to_v16i64_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s46, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s47, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -1927,8 +1907,6 @@ define inreg <16 x i64> @bitcast_v32i32_to_v16i64_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v30, s7
; GFX9-NEXT: v_mov_b32_e32 v31, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v32i32_to_v16i64_scalar:
; GFX11: ; %bb.0:
@@ -1950,7 +1928,7 @@ define inreg <16 x i64> @bitcast_v32i32_to_v16i64_scalar(<32 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s41, v0
; GFX11-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-NEXT: s_mov_b32 s42, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -2006,8 +1984,6 @@ define inreg <16 x i64> @bitcast_v32i32_to_v16i64_scalar(<32 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v28, s7 :: v_dual_mov_b32 v29, s6
; GFX11-NEXT: v_dual_mov_b32 v30, s5 :: v_dual_mov_b32 v31, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2276,7 +2252,7 @@ define inreg <32 x i32> @bitcast_v16i64_to_v32i32_scalar(<16 x i64> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s46, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s47, v0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB7_3
; SI-NEXT: .LBB7_2: ; %cmp.true
@@ -2346,8 +2322,6 @@ define inreg <32 x i32> @bitcast_v16i64_to_v32i32_scalar(<16 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v30, s7
; SI-NEXT: v_mov_b32_e32 v31, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v16i64_to_v32i32_scalar:
; VI: ; %bb.0:
@@ -2372,7 +2346,7 @@ define inreg <32 x i32> @bitcast_v16i64_to_v32i32_scalar(<16 x i64> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -2442,8 +2416,6 @@ define inreg <32 x i32> @bitcast_v16i64_to_v32i32_scalar(<16 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v30, s7
; VI-NEXT: v_mov_b32_e32 v31, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v16i64_to_v32i32_scalar:
; GFX9: ; %bb.0:
@@ -2468,7 +2440,7 @@ define inreg <32 x i32> @bitcast_v16i64_to_v32i32_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s46, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s47, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
@@ -2538,8 +2510,6 @@ define inreg <32 x i32> @bitcast_v16i64_to_v32i32_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v30, s7
; GFX9-NEXT: v_mov_b32_e32 v31, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v16i64_to_v32i32_scalar:
; GFX11: ; %bb.0:
@@ -2561,7 +2531,7 @@ define inreg <32 x i32> @bitcast_v16i64_to_v32i32_scalar(<16 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s41, v0
; GFX11-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-NEXT: s_mov_b32 s42, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
@@ -2617,8 +2587,6 @@ define inreg <32 x i32> @bitcast_v16i64_to_v32i32_scalar(<16 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v28, s7 :: v_dual_mov_b32 v29, s6
; GFX11-NEXT: v_dual_mov_b32 v30, s5 :: v_dual_mov_b32 v31, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2879,7 +2847,7 @@ define inreg <16 x double> @bitcast_v32i32_to_v16f64_scalar(<32 x i32> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s46, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s47, v0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB9_3
; SI-NEXT: .LBB9_2: ; %cmp.true
@@ -2949,8 +2917,6 @@ define inreg <16 x double> @bitcast_v32i32_to_v16f64_scalar(<32 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v30, s7
; SI-NEXT: v_mov_b32_e32 v31, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v32i32_to_v16f64_scalar:
; VI: ; %bb.0:
@@ -2975,7 +2941,7 @@ define inreg <16 x double> @bitcast_v32i32_to_v16f64_scalar(<32 x i32> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -3045,8 +3011,6 @@ define inreg <16 x double> @bitcast_v32i32_to_v16f64_scalar(<32 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v30, s7
; VI-NEXT: v_mov_b32_e32 v31, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v32i32_to_v16f64_scalar:
; GFX9: ; %bb.0:
@@ -3071,7 +3035,7 @@ define inreg <16 x double> @bitcast_v32i32_to_v16f64_scalar(<32 x i32> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s46, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s47, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -3141,8 +3105,6 @@ define inreg <16 x double> @bitcast_v32i32_to_v16f64_scalar(<32 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v30, s7
; GFX9-NEXT: v_mov_b32_e32 v31, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v32i32_to_v16f64_scalar:
; GFX11: ; %bb.0:
@@ -3164,7 +3126,7 @@ define inreg <16 x double> @bitcast_v32i32_to_v16f64_scalar(<32 x i32> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s41, v0
; GFX11-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-NEXT: s_mov_b32 s42, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -3220,8 +3182,6 @@ define inreg <16 x double> @bitcast_v32i32_to_v16f64_scalar(<32 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v28, s7 :: v_dual_mov_b32 v29, s6
; GFX11-NEXT: v_dual_mov_b32 v30, s5 :: v_dual_mov_b32 v31, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3452,9 +3412,9 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[30:31], s[66:67], 1.0
; SI-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
@@ -3472,10 +3432,8 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; SI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; SI-NEXT: s_branch .LBB11_5
+; SI-NEXT: s_branch .LBB11_4
; SI-NEXT: .LBB11_3:
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -3508,7 +3466,7 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB11_5: ; %end
+; SI-NEXT: .LBB11_4: ; %end
; SI-NEXT: v_readlane_b32 s67, v32, 15
; SI-NEXT: v_readlane_b32 s66, v32, 14
; SI-NEXT: v_readlane_b32 s65, v32, 13
@@ -3587,9 +3545,9 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[30:31], s[66:67], 1.0
; VI-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
@@ -3607,10 +3565,8 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; VI-NEXT: s_branch .LBB11_5
+; VI-NEXT: s_branch .LBB11_4
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -3643,7 +3599,7 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB11_5: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v32, 15
; VI-NEXT: v_readlane_b32 s66, v32, 14
; VI-NEXT: v_readlane_b32 s65, v32, 13
@@ -3722,9 +3678,9 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[30:31], s[66:67], 1.0
; GFX9-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
@@ -3742,10 +3698,8 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX9-NEXT: s_branch .LBB11_5
+; GFX9-NEXT: s_branch .LBB11_4
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -3778,7 +3732,7 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB11_5: ; %end
+; GFX9-NEXT: .LBB11_4: ; %end
; GFX9-NEXT: v_readlane_b32 s67, v32, 15
; GFX9-NEXT: v_readlane_b32 s66, v32, 14
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
@@ -3858,10 +3812,10 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-NEXT: v_writelane_b32 v32, s67, 15
; GFX11-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[30:31], s[66:67], 1.0
; GFX11-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
@@ -3879,10 +3833,8 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX11-NEXT: s_branch .LBB11_5
+; GFX11-NEXT: s_branch .LBB11_4
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -3899,7 +3851,7 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB11_5: ; %end
+; GFX11-NEXT: .LBB11_4: ; %end
; GFX11-NEXT: v_readlane_b32 s67, v32, 15
; GFX11-NEXT: v_readlane_b32 s66, v32, 14
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
@@ -10020,7 +9972,7 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s44, v1
; GFX9-NEXT: v_writelane_b32 v29, s99, 35
; GFX9-NEXT: ; implicit-def: $vgpr30 : SGPR spill to VGPR lane
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s46, s5, 24
; GFX9-NEXT: v_writelane_b32 v30, s46, 0
@@ -10666,154 +10618,6 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr81
-; GFX9-NEXT: ; implicit-def: $sgpr53
-; GFX9-NEXT: ; implicit-def: $sgpr83
-; GFX9-NEXT: ; implicit-def: $sgpr50
-; GFX9-NEXT: ; implicit-def: $sgpr52
-; GFX9-NEXT: ; implicit-def: $sgpr69
-; GFX9-NEXT: ; implicit-def: $sgpr54
-; GFX9-NEXT: ; implicit-def: $sgpr68
-; GFX9-NEXT: ; implicit-def: $sgpr38
-; GFX9-NEXT: ; implicit-def: $sgpr48
-; GFX9-NEXT: ; implicit-def: $sgpr70
-; GFX9-NEXT: ; implicit-def: $sgpr39
-; GFX9-NEXT: ; implicit-def: $sgpr80
-; GFX9-NEXT: ; implicit-def: $sgpr99
-; GFX9-NEXT: ; implicit-def: $sgpr49
-; GFX9-NEXT: ; implicit-def: $sgpr64
-; GFX9-NEXT: ; implicit-def: $sgpr97
-; GFX9-NEXT: ; implicit-def: $sgpr66
-; GFX9-NEXT: ; implicit-def: $sgpr87
-; GFX9-NEXT: ; implicit-def: $sgpr98
-; GFX9-NEXT: ; implicit-def: $sgpr55
-; GFX9-NEXT: ; implicit-def: $sgpr86
-; GFX9-NEXT: ; implicit-def: $sgpr65
-; GFX9-NEXT: ; implicit-def: $sgpr84
-; GFX9-NEXT: ; implicit-def: $sgpr82
-; GFX9-NEXT: ; implicit-def: $sgpr67
-; GFX9-NEXT: ; implicit-def: $sgpr51
-; GFX9-NEXT: ; implicit-def: $sgpr71
-; GFX9-NEXT: ; implicit-def: $sgpr96
-; GFX9-NEXT: ; implicit-def: $sgpr85
-; GFX9-NEXT: ; implicit-def: $sgpr36
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v32i32_to_v128i8_scalar:
; GFX11: ; %bb.0:
@@ -10885,7 +10689,7 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX11-NEXT: v_writelane_b32 v24, s85, 29
; GFX11-NEXT: v_writelane_b32 v24, s86, 30
; GFX11-NEXT: v_writelane_b32 v24, s87, 31
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s42, s5, 24
; GFX11-NEXT: s_lshr_b32 vcc_hi, s27, 24
@@ -11455,144 +11259,6 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX11-NEXT: s_mov_b32 exec_lo, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr104
-; GFX11-NEXT: ; implicit-def: $sgpr103
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $sgpr102
-; GFX11-NEXT: ; implicit-def: $sgpr83
-; GFX11-NEXT: ; implicit-def: $sgpr81
-; GFX11-NEXT: ; implicit-def: $sgpr101
-; GFX11-NEXT: ; implicit-def: $sgpr100
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr99
-; GFX11-NEXT: ; implicit-def: $sgpr71
-; GFX11-NEXT: ; implicit-def: $sgpr70
-; GFX11-NEXT: ; implicit-def: $sgpr98
-; GFX11-NEXT: ; implicit-def: $sgpr97
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr96
-; GFX11-NEXT: ; implicit-def: $sgpr68
-; GFX11-NEXT: ; implicit-def: $sgpr67
-; GFX11-NEXT: ; implicit-def: $sgpr87
-; GFX11-NEXT: ; implicit-def: $sgpr86
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr85
-; GFX11-NEXT: ; implicit-def: $sgpr66
-; GFX11-NEXT: ; implicit-def: $sgpr65
-; GFX11-NEXT: ; implicit-def: $sgpr82
-; GFX11-NEXT: ; implicit-def: $sgpr84
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr69
-; GFX11-NEXT: ; implicit-def: $sgpr64
-; GFX11-NEXT: ; implicit-def: $sgpr55
-; GFX11-NEXT: ; implicit-def: $sgpr53
-; GFX11-NEXT: ; implicit-def: $sgpr80
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr51
-; GFX11-NEXT: ; implicit-def: $sgpr52
-; GFX11-NEXT: ; implicit-def: $sgpr50
-; GFX11-NEXT: ; implicit-def: $sgpr49
-; GFX11-NEXT: ; implicit-def: $sgpr54
-; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr48
-; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr37
-; GFX11-NEXT: ; implicit-def: $sgpr35
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $vcc_hi
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -20434,7 +20100,7 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v31
; GFX11-TRUE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v10, 0xc0c0004
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
@@ -20879,9 +20545,6 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:324
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB15_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB15_2
;
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v32i32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -21007,7 +20670,7 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v31
; GFX11-FAKE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB15_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v10, 0xc0c0004
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(1)
@@ -21452,9 +21115,6 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32 offset:324
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB15_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB15_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -22813,7 +22473,7 @@ define inreg <64 x bfloat> @bitcast_v32i32_to_v64bf16_scalar(<32 x i32> inreg %a
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
@@ -22883,8 +22543,6 @@ define inreg <64 x bfloat> @bitcast_v32i32_to_v64bf16_scalar(<32 x i32> inreg %a
; VI-NEXT: v_mov_b32_e32 v30, s7
; VI-NEXT: v_mov_b32_e32 v31, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v32i32_to_v64bf16_scalar:
; GFX9: ; %bb.0:
@@ -22909,7 +22567,7 @@ define inreg <64 x bfloat> @bitcast_v32i32_to_v64bf16_scalar(<32 x i32> inreg %a
; GFX9-NEXT: v_readfirstlane_b32 s46, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s47, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
@@ -22979,8 +22637,6 @@ define inreg <64 x bfloat> @bitcast_v32i32_to_v64bf16_scalar(<32 x i32> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v30, s7
; GFX9-NEXT: v_mov_b32_e32 v31, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v32i32_to_v64bf16_scalar:
; GFX11: ; %bb.0:
@@ -23002,7 +22658,7 @@ define inreg <64 x bfloat> @bitcast_v32i32_to_v64bf16_scalar(<32 x i32> inreg %a
; GFX11-NEXT: v_readfirstlane_b32 s41, v0
; GFX11-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-NEXT: s_mov_b32 s42, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
@@ -23057,8 +22713,6 @@ define inreg <64 x bfloat> @bitcast_v32i32_to_v64bf16_scalar(<32 x i32> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v28, s7 :: v_dual_mov_b32 v29, s6
; GFX11-NEXT: v_dual_mov_b32 v30, s5 :: v_dual_mov_b32 v31, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -26950,9 +26604,9 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
+; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v32, 0x40c00000
; VI-NEXT: s_lshl_b32 s4, s50, 16
@@ -27546,10 +27200,8 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_cndmask_b32_e32 v31, v32, v34, vcc
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v33
; VI-NEXT: v_lshrrev_b64 v[31:32], 16, v[31:32]
-; VI-NEXT: s_branch .LBB19_5
+; VI-NEXT: s_branch .LBB19_4
; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
-; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -27582,7 +27234,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB19_5: ; %end
+; VI-NEXT: .LBB19_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v35, 15
; VI-NEXT: v_readlane_b32 s66, v35, 14
; VI-NEXT: v_readlane_b32 s65, v35, 13
@@ -27661,9 +27313,9 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s51, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v16, 0x40c00000
@@ -28275,10 +27927,8 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_and_b32_sdwa v16, v32, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v33
; GFX9-NEXT: v_lshl_or_b32 v16, v32, 16, v16
-; GFX9-NEXT: s_branch .LBB19_5
+; GFX9-NEXT: s_branch .LBB19_4
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -28311,7 +27961,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB19_5: ; %end
+; GFX9-NEXT: .LBB19_4: ; %end
; GFX9-NEXT: v_readlane_b32 s67, v36, 15
; GFX9-NEXT: v_readlane_b32 s66, v36, 14
; GFX9-NEXT: v_readlane_b32 s65, v36, 13
@@ -28391,10 +28041,10 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s67, 15
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s0, s51, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s51, 16
@@ -29017,10 +28667,8 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v34
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v33.l
-; GFX11-TRUE16-NEXT: s_branch .LBB19_5
+; GFX11-TRUE16-NEXT: s_branch .LBB19_4
; GFX11-TRUE16-NEXT: .LBB19_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB19_2
-; GFX11-TRUE16-NEXT: .LBB19_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -29037,7 +28685,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-TRUE16-NEXT: .LBB19_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB19_4: ; %end
; GFX11-TRUE16-NEXT: v_readlane_b32 s67, v40, 15
; GFX11-TRUE16-NEXT: v_readlane_b32 s66, v40, 14
; GFX11-TRUE16-NEXT: v_readlane_b32 s65, v40, 13
@@ -29117,10 +28765,10 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s67, 15
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-FAKE16-NEXT: .LBB19_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_and_b32 s1, s51, 0xffff0000
; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s51, 16
@@ -29774,10 +29422,8 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v32, 16, v37
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v36, 16, v33
-; GFX11-FAKE16-NEXT: s_branch .LBB19_5
+; GFX11-FAKE16-NEXT: s_branch .LBB19_4
; GFX11-FAKE16-NEXT: .LBB19_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB19_2
-; GFX11-FAKE16-NEXT: .LBB19_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -29794,7 +29440,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-FAKE16-NEXT: .LBB19_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB19_4: ; %end
; GFX11-FAKE16-NEXT: v_readlane_b32 s67, v40, 15
; GFX11-FAKE16-NEXT: v_readlane_b32 s66, v40, 14
; GFX11-FAKE16-NEXT: v_readlane_b32 s65, v40, 13
@@ -30343,7 +29989,7 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; SI-NEXT: s_cmp_lg_u32 s44, 0
; SI-NEXT: v_readfirstlane_b32 s44, v0
; SI-NEXT: v_writelane_b32 v32, s69, 21
-; SI-NEXT: s_cbranch_scc0 .LBB21_4
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s38, s5, 16
; SI-NEXT: s_lshr_b32 s39, s7, 16
@@ -30599,40 +30245,6 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_4:
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr69
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr68
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr67
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr66
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr65
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: s_branch .LBB21_2
;
; VI-LABEL: bitcast_v32i32_to_v64f16_scalar:
; VI: ; %bb.0:
@@ -30657,7 +30269,7 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
@@ -30727,8 +30339,6 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v30, s7
; VI-NEXT: v_mov_b32_e32 v31, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v32i32_to_v64f16_scalar:
; GFX9: ; %bb.0:
@@ -30753,7 +30363,7 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s46, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s47, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
@@ -30823,8 +30433,6 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v30, s7
; GFX9-NEXT: v_mov_b32_e32 v31, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_v32i32_to_v64f16_scalar:
; GFX11: ; %bb.0:
@@ -30846,7 +30454,7 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s41, v0
; GFX11-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-NEXT: s_mov_b32 s42, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
@@ -30901,8 +30509,6 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v28, s7 :: v_dual_mov_b32 v29, s6
; GFX11-NEXT: v_dual_mov_b32 v30, s5 :: v_dual_mov_b32 v31, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_4:
-; GFX11-NEXT: s_branch .LBB21_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -31982,7 +31588,7 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v35, s65, 2
; SI-NEXT: v_writelane_b32 v35, s64, 3
-; SI-NEXT: s_cbranch_scc0 .LBB23_3
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s13, 16
@@ -32080,7 +31686,7 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, vcc_lo, 16
; SI-NEXT: s_or_b32 s67, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB23_4
+; SI-NEXT: s_cbranch_execnz .LBB23_3
; SI-NEXT: .LBB23_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s13
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -32342,11 +31948,8 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v30, v31, v30
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v32
; SI-NEXT: v_or_b32_e32 v31, v33, v31
-; SI-NEXT: s_branch .LBB23_5
+; SI-NEXT: s_branch .LBB23_4
; SI-NEXT: .LBB23_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB23_2
-; SI-NEXT: .LBB23_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -32379,7 +31982,7 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB23_5: ; %end
+; SI-NEXT: .LBB23_4: ; %end
; SI-NEXT: v_readlane_b32 s99, v34, 35
; SI-NEXT: v_readlane_b32 s98, v34, 34
; SI-NEXT: v_readlane_b32 s97, v34, 33
@@ -32479,9 +32082,9 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB23_3
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_4
+; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s51, 16
; VI-NEXT: v_mov_b32_e32 v16, 0x200
@@ -32644,10 +32247,8 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_add_f16_sdwa v32, v32, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v16, s52, v16
; VI-NEXT: v_or_b32_e32 v16, v16, v32
-; VI-NEXT: s_branch .LBB23_5
+; VI-NEXT: s_branch .LBB23_4
; VI-NEXT: .LBB23_3:
-; VI-NEXT: s_branch .LBB23_2
-; VI-NEXT: .LBB23_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -32680,7 +32281,7 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB23_5: ; %end
+; VI-NEXT: .LBB23_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v33, 15
; VI-NEXT: v_readlane_b32 s66, v33, 14
; VI-NEXT: v_readlane_b32 s65, v33, 13
@@ -32759,9 +32360,9 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_4
+; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v16, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, s51, v16 op_sel_hi:[1,0]
@@ -32796,10 +32397,8 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v18, s54, v16 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v17, s53, v16 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v16, s52, v16 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB23_5
+; GFX9-NEXT: s_branch .LBB23_4
; GFX9-NEXT: .LBB23_3:
-; GFX9-NEXT: s_branch .LBB23_2
-; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -32832,7 +32431,7 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB23_5: ; %end
+; GFX9-NEXT: .LBB23_4: ; %end
; GFX9-NEXT: v_readlane_b32 s67, v32, 15
; GFX9-NEXT: v_readlane_b32 s66, v32, 14
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
@@ -32912,10 +32511,10 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-NEXT: v_writelane_b32 v32, s67, 15
; GFX11-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
; GFX11-NEXT: .LBB23_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s51 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s50 op_sel_hi:[0,1]
@@ -32949,10 +32548,8 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v18, 0x200, s54 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s53 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v16, 0x200, s52 op_sel_hi:[0,1]
-; GFX11-NEXT: s_branch .LBB23_5
+; GFX11-NEXT: s_branch .LBB23_4
; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
-; GFX11-NEXT: .LBB23_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -32969,7 +32566,7 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB23_5: ; %end
+; GFX11-NEXT: .LBB23_4: ; %end
; GFX11-NEXT: v_readlane_b32 s67, v32, 15
; GFX11-NEXT: v_readlane_b32 s66, v32, 14
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
@@ -33518,7 +33115,7 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3
; SI-NEXT: s_cmp_lg_u32 s44, 0
; SI-NEXT: v_readfirstlane_b32 s44, v0
; SI-NEXT: v_writelane_b32 v32, s69, 21
-; SI-NEXT: s_cbranch_scc0 .LBB25_4
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s38, s5, 16
; SI-NEXT: s_lshr_b32 s39, s7, 16
@@ -33774,40 +33371,6 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB25_4:
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr69
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr68
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr67
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr66
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr65
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: s_branch .LBB25_2
;
; VI-LABEL: bitcast_v32i32_to_v64i16_scalar:
; VI: ; %bb.0:
@@ -33832,7 +33395,7 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB25_4
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB25_3
; VI-NEXT: .LBB25_2: ; %cmp.true
@@ -33902,8 +33465,6 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v30, s7
; VI-NEXT: v_mov_b32_e32 v31, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB25_4:
-; VI-NEXT: s_branch .LBB25_2
;
; GFX9-LABEL: bitcast_v32i32_to_v64i16_scalar:
; GFX9: ; %bb.0:
@@ -33928,7 +33489,7 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s46, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s47, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB25_3
; GFX9-NEXT: .LBB25_2: ; %cmp.true
@@ -33998,8 +33559,6 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v30, s7
; GFX9-NEXT: v_mov_b32_e32 v31, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB25_4:
-; GFX9-NEXT: s_branch .LBB25_2
;
; GFX11-LABEL: bitcast_v32i32_to_v64i16_scalar:
; GFX11: ; %bb.0:
@@ -34021,7 +33580,7 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s41, v0
; GFX11-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-NEXT: s_mov_b32 s42, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-NEXT: s_cbranch_vccnz .LBB25_3
@@ -34076,8 +33635,6 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v28, s7 :: v_dual_mov_b32 v29, s6
; GFX11-NEXT: v_dual_mov_b32 v30, s5 :: v_dual_mov_b32 v31, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB25_4:
-; GFX11-NEXT: s_branch .LBB25_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -35361,7 +34918,7 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB27_4
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB27_3
; VI-NEXT: .LBB27_2: ; %cmp.true
@@ -35559,8 +35116,6 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v30, s7
; VI-NEXT: v_mov_b32_e32 v31, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_4:
-; VI-NEXT: s_branch .LBB27_2
;
; GFX9-LABEL: bitcast_v64i16_to_v32i32_scalar:
; GFX9: ; %bb.0:
@@ -35618,9 +35173,9 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_4
+; GFX9-NEXT: s_cbranch_execnz .LBB27_3
; GFX9-NEXT: .LBB27_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, s51, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, s50, 3 op_sel_hi:[1,0]
@@ -35654,10 +35209,8 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v18, s54, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v17, s53, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v16, s52, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB27_5
+; GFX9-NEXT: s_branch .LBB27_4
; GFX9-NEXT: .LBB27_3:
-; GFX9-NEXT: s_branch .LBB27_2
-; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -35690,7 +35243,7 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB27_5: ; %end
+; GFX9-NEXT: .LBB27_4: ; %end
; GFX9-NEXT: v_readlane_b32 s67, v32, 15
; GFX9-NEXT: v_readlane_b32 s66, v32, 14
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
@@ -35770,10 +35323,10 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-NEXT: v_writelane_b32 v32, s67, 15
; GFX11-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB27_3
; GFX11-NEXT: .LBB27_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s51, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s50, 3 op_sel_hi:[1,0]
@@ -35807,10 +35360,8 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v18, s54, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v17, s53, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v16, s52, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: s_branch .LBB27_5
+; GFX11-NEXT: s_branch .LBB27_4
; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
-; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -35827,7 +35378,7 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB27_5: ; %end
+; GFX11-NEXT: .LBB27_4: ; %end
; GFX11-NEXT: v_readlane_b32 s67, v32, 15
; GFX11-NEXT: v_readlane_b32 s66, v32, 14
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
@@ -36127,9 +35678,9 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB29_4
+; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v31, s67, 1.0
; SI-NEXT: v_add_f32_e64 v30, s66, 1.0
@@ -36163,10 +35714,8 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB29_5
+; SI-NEXT: s_branch .LBB29_4
; SI-NEXT: .LBB29_3:
-; SI-NEXT: s_branch .LBB29_2
-; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -36199,7 +35748,7 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB29_5: ; %end
+; SI-NEXT: .LBB29_4: ; %end
; SI-NEXT: v_readlane_b32 s67, v32, 15
; SI-NEXT: v_readlane_b32 s66, v32, 14
; SI-NEXT: v_readlane_b32 s65, v32, 13
@@ -36278,9 +35827,9 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
+; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v31, s67, 1.0
; VI-NEXT: v_add_f32_e64 v30, s66, 1.0
@@ -36314,10 +35863,8 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB29_5
+; VI-NEXT: s_branch .LBB29_4
; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
-; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -36350,7 +35897,7 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB29_5: ; %end
+; VI-NEXT: .LBB29_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v32, 15
; VI-NEXT: v_readlane_b32 s66, v32, 14
; VI-NEXT: v_readlane_b32 s65, v32, 13
@@ -36429,9 +35976,9 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
+; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v31, s67, 1.0
; GFX9-NEXT: v_add_f32_e64 v30, s66, 1.0
@@ -36465,10 +36012,8 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB29_5
+; GFX9-NEXT: s_branch .LBB29_4
; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
-; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -36501,7 +36046,7 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB29_5: ; %end
+; GFX9-NEXT: .LBB29_4: ; %end
; GFX9-NEXT: v_readlane_b32 s67, v32, 15
; GFX9-NEXT: v_readlane_b32 s66, v32, 14
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
@@ -36581,10 +36126,10 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-NEXT: v_writelane_b32 v32, s67, 15
; GFX11-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-NEXT: .LBB29_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v31, s67, 1.0
; GFX11-NEXT: v_add_f32_e64 v30, s66, 1.0
@@ -36618,10 +36163,8 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB29_5
+; GFX11-NEXT: s_branch .LBB29_4
; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
-; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -36638,7 +36181,7 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB29_5: ; %end
+; GFX11-NEXT: .LBB29_4: ; %end
; GFX11-NEXT: v_readlane_b32 s67, v32, 15
; GFX11-NEXT: v_readlane_b32 s66, v32, 14
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
@@ -36928,7 +36471,7 @@ define inreg <32 x float> @bitcast_v16i64_to_v32f32_scalar(<16 x i64> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s46, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s47, v0
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB31_3
; SI-NEXT: .LBB31_2: ; %cmp.true
@@ -36998,8 +36541,6 @@ define inreg <32 x float> @bitcast_v16i64_to_v32f32_scalar(<16 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v30, s7
; SI-NEXT: v_mov_b32_e32 v31, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v16i64_to_v32f32_scalar:
; VI: ; %bb.0:
@@ -37024,7 +36565,7 @@ define inreg <32 x float> @bitcast_v16i64_to_v32f32_scalar(<16 x i64> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB31_3
; VI-NEXT: .LBB31_2: ; %cmp.true
@@ -37094,8 +36635,6 @@ define inreg <32 x float> @bitcast_v16i64_to_v32f32_scalar(<16 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v30, s7
; VI-NEXT: v_mov_b32_e32 v31, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v16i64_to_v32f32_scalar:
; GFX9: ; %bb.0:
@@ -37120,7 +36659,7 @@ define inreg <32 x float> @bitcast_v16i64_to_v32f32_scalar(<16 x i64> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s46, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s47, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
@@ -37190,8 +36729,6 @@ define inreg <32 x float> @bitcast_v16i64_to_v32f32_scalar(<16 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v30, s7
; GFX9-NEXT: v_mov_b32_e32 v31, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB31_4:
-; GFX9-NEXT: s_branch .LBB31_2
;
; GFX11-LABEL: bitcast_v16i64_to_v32f32_scalar:
; GFX11: ; %bb.0:
@@ -37213,7 +36750,7 @@ define inreg <32 x float> @bitcast_v16i64_to_v32f32_scalar(<16 x i64> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s41, v0
; GFX11-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-NEXT: s_mov_b32 s42, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
@@ -37269,8 +36806,6 @@ define inreg <32 x float> @bitcast_v16i64_to_v32f32_scalar(<16 x i64> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v28, s7 :: v_dual_mov_b32 v29, s6
; GFX11-NEXT: v_dual_mov_b32 v30, s5 :: v_dual_mov_b32 v31, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB31_4:
-; GFX11-NEXT: s_branch .LBB31_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -37549,9 +37084,9 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB33_4
+; SI-NEXT: s_cbranch_execnz .LBB33_3
; SI-NEXT: .LBB33_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v31, s67, 1.0
; SI-NEXT: v_add_f32_e64 v30, s66, 1.0
@@ -37585,10 +37120,8 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB33_5
+; SI-NEXT: s_branch .LBB33_4
; SI-NEXT: .LBB33_3:
-; SI-NEXT: s_branch .LBB33_2
-; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -37621,7 +37154,7 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB33_5: ; %end
+; SI-NEXT: .LBB33_4: ; %end
; SI-NEXT: v_readlane_b32 s67, v32, 15
; SI-NEXT: v_readlane_b32 s66, v32, 14
; SI-NEXT: v_readlane_b32 s65, v32, 13
@@ -37700,9 +37233,9 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_4
+; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v31, s67, 1.0
; VI-NEXT: v_add_f32_e64 v30, s66, 1.0
@@ -37736,10 +37269,8 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB33_5
+; VI-NEXT: s_branch .LBB33_4
; VI-NEXT: .LBB33_3:
-; VI-NEXT: s_branch .LBB33_2
-; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -37772,7 +37303,7 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB33_5: ; %end
+; VI-NEXT: .LBB33_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v32, 15
; VI-NEXT: v_readlane_b32 s66, v32, 14
; VI-NEXT: v_readlane_b32 s65, v32, 13
@@ -37851,9 +37382,9 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
+; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v31, s67, 1.0
; GFX9-NEXT: v_add_f32_e64 v30, s66, 1.0
@@ -37887,10 +37418,8 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB33_5
+; GFX9-NEXT: s_branch .LBB33_4
; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: s_branch .LBB33_2
-; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -37923,7 +37452,7 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB33_5: ; %end
+; GFX9-NEXT: .LBB33_4: ; %end
; GFX9-NEXT: v_readlane_b32 s67, v32, 15
; GFX9-NEXT: v_readlane_b32 s66, v32, 14
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
@@ -38003,10 +37532,10 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; GFX11-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-NEXT: v_writelane_b32 v32, s67, 15
; GFX11-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-NEXT: .LBB33_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v31, s67, 1.0
; GFX11-NEXT: v_add_f32_e64 v30, s66, 1.0
@@ -38040,10 +37569,8 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB33_5
+; GFX11-NEXT: s_branch .LBB33_4
; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: s_branch .LBB33_2
-; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -38060,7 +37587,7 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB33_5: ; %end
+; GFX11-NEXT: .LBB33_4: ; %end
; GFX11-NEXT: v_readlane_b32 s67, v32, 15
; GFX11-NEXT: v_readlane_b32 s66, v32, 14
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
@@ -38312,9 +37839,9 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB35_3
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB35_4
+; SI-NEXT: s_cbranch_execnz .LBB35_3
; SI-NEXT: .LBB35_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[30:31], s[66:67], 1.0
; SI-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
@@ -38332,10 +37859,8 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; SI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; SI-NEXT: s_branch .LBB35_5
+; SI-NEXT: s_branch .LBB35_4
; SI-NEXT: .LBB35_3:
-; SI-NEXT: s_branch .LBB35_2
-; SI-NEXT: .LBB35_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -38368,7 +37893,7 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB35_5: ; %end
+; SI-NEXT: .LBB35_4: ; %end
; SI-NEXT: v_readlane_b32 s67, v32, 15
; SI-NEXT: v_readlane_b32 s66, v32, 14
; SI-NEXT: v_readlane_b32 s65, v32, 13
@@ -38447,9 +37972,9 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB35_4
+; VI-NEXT: s_cbranch_execnz .LBB35_3
; VI-NEXT: .LBB35_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[30:31], s[66:67], 1.0
; VI-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
@@ -38467,10 +37992,8 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; VI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; VI-NEXT: s_branch .LBB35_5
+; VI-NEXT: s_branch .LBB35_4
; VI-NEXT: .LBB35_3:
-; VI-NEXT: s_branch .LBB35_2
-; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -38503,7 +38026,7 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB35_5: ; %end
+; VI-NEXT: .LBB35_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v32, 15
; VI-NEXT: v_readlane_b32 s66, v32, 14
; VI-NEXT: v_readlane_b32 s65, v32, 13
@@ -38582,9 +38105,9 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
+; GFX9-NEXT: s_cbranch_execnz .LBB35_3
; GFX9-NEXT: .LBB35_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[30:31], s[66:67], 1.0
; GFX9-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
@@ -38602,10 +38125,8 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; GFX9-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX9-NEXT: s_branch .LBB35_5
+; GFX9-NEXT: s_branch .LBB35_4
; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
-; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -38638,7 +38159,7 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB35_5: ; %end
+; GFX9-NEXT: .LBB35_4: ; %end
; GFX9-NEXT: v_readlane_b32 s67, v32, 15
; GFX9-NEXT: v_readlane_b32 s66, v32, 14
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
@@ -38718,10 +38239,10 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; GFX11-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-NEXT: v_writelane_b32 v32, s67, 15
; GFX11-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-NEXT: .LBB35_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[30:31], s[66:67], 1.0
; GFX11-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
@@ -38739,10 +38260,8 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; GFX11-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX11-NEXT: s_branch .LBB35_5
+; GFX11-NEXT: s_branch .LBB35_4
; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
-; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -38759,7 +38278,7 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB35_5: ; %end
+; GFX11-NEXT: .LBB35_4: ; %end
; GFX11-NEXT: v_readlane_b32 s67, v32, 15
; GFX11-NEXT: v_readlane_b32 s66, v32, 14
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
@@ -44111,7 +43630,7 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane
-; VI-NEXT: s_cbranch_scc0 .LBB37_3
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s46, s5, 24
; VI-NEXT: v_writelane_b32 v62, s46, 57
@@ -44267,7 +43786,7 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; VI-NEXT: s_lshr_b64 s[36:37], s[20:21], 24
; VI-NEXT: s_lshr_b64 s[38:39], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[48:49], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB37_4
+; VI-NEXT: s_cbranch_execnz .LBB37_3
; VI-NEXT: .LBB37_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v2, s5, 1.0
; VI-NEXT: v_add_f32_e64 v1, s4, 1.0
@@ -44475,164 +43994,8 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v60, 8, v32
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v31
; VI-NEXT: v_lshrrev_b32_e32 v61, 8, v31
-; VI-NEXT: s_branch .LBB37_5
+; VI-NEXT: s_branch .LBB37_4
; VI-NEXT: .LBB37_3:
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr71
-; VI-NEXT: ; implicit-def: $sgpr69
-; VI-NEXT: ; implicit-def: $sgpr70
-; VI-NEXT: ; implicit-def: $sgpr68
-; VI-NEXT: ; implicit-def: $sgpr67
-; VI-NEXT: ; implicit-def: $sgpr66
-; VI-NEXT: ; implicit-def: $sgpr64
-; VI-NEXT: ; implicit-def: $sgpr65
-; VI-NEXT: ; implicit-def: $sgpr55
-; VI-NEXT: ; implicit-def: $sgpr54
-; VI-NEXT: ; implicit-def: $sgpr53
-; VI-NEXT: ; implicit-def: $sgpr51
-; VI-NEXT: ; implicit-def: $sgpr52
-; VI-NEXT: ; implicit-def: $sgpr50
-; VI-NEXT: ; implicit-def: $sgpr87
-; VI-NEXT: ; implicit-def: $sgpr86
-; VI-NEXT: ; implicit-def: $sgpr84
-; VI-NEXT: ; implicit-def: $sgpr85
-; VI-NEXT: ; implicit-def: $sgpr83
-; VI-NEXT: ; implicit-def: $sgpr82
-; VI-NEXT: ; implicit-def: $sgpr81
-; VI-NEXT: ; implicit-def: $sgpr80
-; VI-NEXT: ; implicit-def: $sgpr48
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB37_2
-; VI-NEXT: .LBB37_4:
; VI-NEXT: v_mov_b32_e32 v53, s46
; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
@@ -44897,7 +44260,7 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v53, s30
; VI-NEXT: v_mov_b32_e32 v54, s34
-; VI-NEXT: .LBB37_5: ; %end
+; VI-NEXT: .LBB37_4: ; %end
; VI-NEXT: s_mov_b32 s4, 0xc0c0004
; VI-NEXT: v_perm_b32 v33, v33, v41, s4
; VI-NEXT: v_perm_b32 v31, v31, v61, s4
@@ -45344,7 +44707,7 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s46, s5, 24
; GFX9-NEXT: v_writelane_b32 v62, s46, 49
@@ -45492,7 +44855,7 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX9-NEXT: s_lshr_b64 s[30:31], s[20:21], 24
; GFX9-NEXT: s_lshr_b64 s[34:35], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[36:37], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB37_4
+; GFX9-NEXT: s_cbranch_execnz .LBB37_3
; GFX9-NEXT: .LBB37_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v2, s5, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s4, 1.0
@@ -45709,156 +45072,8 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 8, v49
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v48
; GFX9-NEXT: v_lshrrev_b32_e32 v20, 8, v48
-; GFX9-NEXT: s_branch .LBB37_5
+; GFX9-NEXT: s_branch .LBB37_4
; GFX9-NEXT: .LBB37_3:
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr81
-; GFX9-NEXT: ; implicit-def: $sgpr71
-; GFX9-NEXT: ; implicit-def: $sgpr80
-; GFX9-NEXT: ; implicit-def: $sgpr70
-; GFX9-NEXT: ; implicit-def: $sgpr69
-; GFX9-NEXT: ; implicit-def: $sgpr68
-; GFX9-NEXT: ; implicit-def: $sgpr66
-; GFX9-NEXT: ; implicit-def: $sgpr67
-; GFX9-NEXT: ; implicit-def: $sgpr65
-; GFX9-NEXT: ; implicit-def: $sgpr64
-; GFX9-NEXT: ; implicit-def: $sgpr55
-; GFX9-NEXT: ; implicit-def: $sgpr53
-; GFX9-NEXT: ; implicit-def: $sgpr54
-; GFX9-NEXT: ; implicit-def: $sgpr52
-; GFX9-NEXT: ; implicit-def: $sgpr51
-; GFX9-NEXT: ; implicit-def: $sgpr50
-; GFX9-NEXT: ; implicit-def: $sgpr48
-; GFX9-NEXT: ; implicit-def: $sgpr49
-; GFX9-NEXT: ; implicit-def: $sgpr39
-; GFX9-NEXT: ; implicit-def: $sgpr38
-; GFX9-NEXT: ; implicit-def: $sgpr99
-; GFX9-NEXT: ; implicit-def: $sgpr97
-; GFX9-NEXT: ; implicit-def: $sgpr98
-; GFX9-NEXT: ; implicit-def: $sgpr96
-; GFX9-NEXT: ; implicit-def: $sgpr87
-; GFX9-NEXT: ; implicit-def: $sgpr86
-; GFX9-NEXT: ; implicit-def: $sgpr84
-; GFX9-NEXT: ; implicit-def: $sgpr85
-; GFX9-NEXT: ; implicit-def: $sgpr83
-; GFX9-NEXT: ; implicit-def: $sgpr82
-; GFX9-NEXT: ; implicit-def: $sgpr36
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB37_2
-; GFX9-NEXT: .LBB37_4:
; GFX9-NEXT: v_mov_b32_e32 v53, s46
; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
@@ -46124,7 +45339,7 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v53, s90
; GFX9-NEXT: v_mov_b32_e32 v54, s92
-; GFX9-NEXT: .LBB37_5: ; %end
+; GFX9-NEXT: .LBB37_4: ; %end
; GFX9-NEXT: s_mov_b32 s4, 0xc0c0004
; GFX9-NEXT: v_perm_b32 v32, v32, v15, s4
; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload
@@ -46560,7 +45775,7 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_writelane_b32 v74, s85, 29
; GFX11-NEXT: v_writelane_b32 v74, s86, 30
; GFX11-NEXT: v_writelane_b32 v74, s87, 31
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s42, s5, 24
; GFX11-NEXT: s_lshr_b32 vcc_hi, s27, 24
@@ -46709,7 +45924,7 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_writelane_b32 v76, s42, 0
; GFX11-NEXT: s_lshr_b64 s[42:43], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_lo
-; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
; GFX11-NEXT: .LBB37_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v12, s15, 1.0
; GFX11-NEXT: v_add_f32_e64 v11, s14, 1.0
@@ -46839,146 +46054,8 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v63, 8, v34
; GFX11-NEXT: v_lshrrev_b32_e32 v73, 16, v33
; GFX11-NEXT: v_lshrrev_b32_e32 v72, 8, v33
-; GFX11-NEXT: s_branch .LBB37_5
+; GFX11-NEXT: s_branch .LBB37_4
; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr104
-; GFX11-NEXT: ; implicit-def: $sgpr102
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr103
-; GFX11-NEXT: ; implicit-def: $sgpr101
-; GFX11-NEXT: ; implicit-def: $sgpr100
-; GFX11-NEXT: ; implicit-def: $sgpr99
-; GFX11-NEXT: ; implicit-def: $sgpr97
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr98
-; GFX11-NEXT: ; implicit-def: $sgpr96
-; GFX11-NEXT: ; implicit-def: $sgpr87
-; GFX11-NEXT: ; implicit-def: $sgpr86
-; GFX11-NEXT: ; implicit-def: $sgpr84
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr85
-; GFX11-NEXT: ; implicit-def: $sgpr83
-; GFX11-NEXT: ; implicit-def: $sgpr82
-; GFX11-NEXT: ; implicit-def: $sgpr81
-; GFX11-NEXT: ; implicit-def: $sgpr71
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr80
-; GFX11-NEXT: ; implicit-def: $sgpr70
-; GFX11-NEXT: ; implicit-def: $sgpr69
-; GFX11-NEXT: ; implicit-def: $sgpr68
-; GFX11-NEXT: ; implicit-def: $sgpr66
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr67
-; GFX11-NEXT: ; implicit-def: $sgpr65
-; GFX11-NEXT: ; implicit-def: $sgpr64
-; GFX11-NEXT: ; implicit-def: $sgpr55
-; GFX11-NEXT: ; implicit-def: $sgpr53
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr54
-; GFX11-NEXT: ; implicit-def: $sgpr52
-; GFX11-NEXT: ; implicit-def: $sgpr51
-; GFX11-NEXT: ; implicit-def: $sgpr50
-; GFX11-NEXT: ; implicit-def: $sgpr48
-; GFX11-NEXT: ; implicit-def: $sgpr49
-; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr37
-; GFX11-NEXT: ; implicit-def: $sgpr35
-; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $vcc_hi
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: s_branch .LBB37_2
-; GFX11-NEXT: .LBB37_4:
; GFX11-NEXT: v_dual_mov_b32 v33, s0 :: v_dual_mov_b32 v34, s1
; GFX11-NEXT: v_readlane_b32 s0, v76, 0
; GFX11-NEXT: v_dual_mov_b32 v31, s2 :: v_dual_mov_b32 v32, s3
@@ -47124,7 +46201,7 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_mov_b32_e32 v69, s0
; GFX11-NEXT: v_readlane_b32 s0, v77, 7
; GFX11-NEXT: v_mov_b32_e32 v70, s0
-; GFX11-NEXT: .LBB37_5: ; %end
+; GFX11-NEXT: .LBB37_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_perm_b32 v67, v73, v67, 0xc0c0004
; GFX11-NEXT: v_perm_b32 v66, v60, v66, 0xc0c0004
@@ -56174,7 +55251,7 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v31
; GFX11-TRUE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB39_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB39_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v10, 0xc0c0004
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
@@ -56619,9 +55696,6 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:324
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB39_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB39_2
;
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v32f32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -56747,7 +55821,7 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v31
; GFX11-FAKE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB39_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB39_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v10, 0xc0c0004
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(1)
@@ -57192,9 +56266,6 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32 offset:324
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB39_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB39_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -58075,7 +57146,7 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane
-; SI-NEXT: s_cbranch_scc0 .LBB41_3
+; SI-NEXT: s_cbranch_scc0 .LBB41_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s6, 0xffff0000
; SI-NEXT: v_writelane_b32 v62, s4, 3
@@ -58145,7 +57216,7 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; SI-NEXT: s_lshl_b32 s57, s17, 16
; SI-NEXT: s_and_b32 s58, s16, 0xffff0000
; SI-NEXT: s_lshl_b32 s59, s16, 16
-; SI-NEXT: s_cbranch_execnz .LBB41_4
+; SI-NEXT: s_cbranch_execnz .LBB41_3
; SI-NEXT: .LBB41_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v29, s6, 1.0
; SI-NEXT: v_add_f32_e64 v28, s7, 1.0
@@ -58257,78 +57328,8 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v1
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v0
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: s_branch .LBB41_5
+; SI-NEXT: s_branch .LBB41_4
; SI-NEXT: .LBB41_3:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr99
-; SI-NEXT: ; implicit-def: $sgpr98
-; SI-NEXT: ; implicit-def: $sgpr97
-; SI-NEXT: ; implicit-def: $sgpr96
-; SI-NEXT: ; implicit-def: $sgpr87
-; SI-NEXT: ; implicit-def: $sgpr86
-; SI-NEXT: ; implicit-def: $sgpr85
-; SI-NEXT: ; implicit-def: $sgpr84
-; SI-NEXT: ; implicit-def: $sgpr83
-; SI-NEXT: ; implicit-def: $sgpr82
-; SI-NEXT: ; implicit-def: $sgpr81
-; SI-NEXT: ; implicit-def: $sgpr80
-; SI-NEXT: ; implicit-def: $sgpr71
-; SI-NEXT: ; implicit-def: $sgpr70
-; SI-NEXT: ; implicit-def: $sgpr69
-; SI-NEXT: ; implicit-def: $sgpr68
-; SI-NEXT: ; implicit-def: $sgpr67
-; SI-NEXT: ; implicit-def: $sgpr66
-; SI-NEXT: ; implicit-def: $sgpr65
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
-; SI-NEXT: s_branch .LBB41_2
-; SI-NEXT: .LBB41_4:
; SI-NEXT: v_mov_b32_e32 v7, s63
; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -58410,7 +57411,7 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; SI-NEXT: v_mov_b32_e32 v51, s72
; SI-NEXT: v_mov_b32_e32 v49, s62
; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; SI-NEXT: .LBB41_5: ; %end
+; SI-NEXT: .LBB41_4: ; %end
; SI-NEXT: v_mul_f32_e32 v34, 1.0, v0
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v3
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v0
@@ -58669,9 +57670,9 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB41_3
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB41_4
+; VI-NEXT: s_cbranch_execnz .LBB41_3
; VI-NEXT: .LBB41_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v15, s31, 1.0
; VI-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -58705,10 +57706,8 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; VI-NEXT: v_add_f32_e64 v18, s38, 1.0
; VI-NEXT: v_add_f32_e64 v17, s37, 1.0
; VI-NEXT: v_add_f32_e64 v16, s36, 1.0
-; VI-NEXT: s_branch .LBB41_5
+; VI-NEXT: s_branch .LBB41_4
; VI-NEXT: .LBB41_3:
-; VI-NEXT: s_branch .LBB41_2
-; VI-NEXT: .LBB41_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v16, s36
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -58741,7 +57740,7 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; VI-NEXT: v_mov_b32_e32 v29, s49
; VI-NEXT: v_mov_b32_e32 v30, s50
; VI-NEXT: v_mov_b32_e32 v31, s51
-; VI-NEXT: .LBB41_5: ; %end
+; VI-NEXT: .LBB41_4: ; %end
; VI-NEXT: v_readlane_b32 s51, v32, 9
; VI-NEXT: v_readlane_b32 s50, v32, 8
; VI-NEXT: v_readlane_b32 s49, v32, 7
@@ -58794,9 +57793,9 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB41_4
+; GFX9-NEXT: s_cbranch_execnz .LBB41_3
; GFX9-NEXT: .LBB41_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v15, s31, 1.0
; GFX9-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -58830,10 +57829,8 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; GFX9-NEXT: v_add_f32_e64 v18, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v17, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v16, s36, 1.0
-; GFX9-NEXT: s_branch .LBB41_5
+; GFX9-NEXT: s_branch .LBB41_4
; GFX9-NEXT: .LBB41_3:
-; GFX9-NEXT: s_branch .LBB41_2
-; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v16, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -58866,7 +57863,7 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; GFX9-NEXT: v_mov_b32_e32 v29, s49
; GFX9-NEXT: v_mov_b32_e32 v30, s50
; GFX9-NEXT: v_mov_b32_e32 v31, s51
-; GFX9-NEXT: .LBB41_5: ; %end
+; GFX9-NEXT: .LBB41_4: ; %end
; GFX9-NEXT: v_readlane_b32 s51, v32, 9
; GFX9-NEXT: v_readlane_b32 s50, v32, 8
; GFX9-NEXT: v_readlane_b32 s49, v32, 7
@@ -58920,10 +57917,10 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; GFX11-NEXT: v_readfirstlane_b32 s50, v12
; GFX11-NEXT: v_writelane_b32 v32, s51, 7
; GFX11-NEXT: v_readfirstlane_b32 s51, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
; GFX11-NEXT: .LBB41_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v15, s27, 1.0
; GFX11-NEXT: v_add_f32_e64 v14, s26, 1.0
@@ -58957,10 +57954,8 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; GFX11-NEXT: v_add_f32_e64 v18, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v17, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v16, s36, 1.0
-; GFX11-NEXT: s_branch .LBB41_5
+; GFX11-NEXT: s_branch .LBB41_4
; GFX11-NEXT: .LBB41_3:
-; GFX11-NEXT: s_branch .LBB41_2
-; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v16, s36 :: v_dual_mov_b32 v17, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -58977,7 +57972,7 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; GFX11-NEXT: v_dual_mov_b32 v26, s46 :: v_dual_mov_b32 v27, s47
; GFX11-NEXT: v_dual_mov_b32 v28, s48 :: v_dual_mov_b32 v29, s49
; GFX11-NEXT: v_dual_mov_b32 v30, s50 :: v_dual_mov_b32 v31, s51
-; GFX11-NEXT: .LBB41_5: ; %end
+; GFX11-NEXT: .LBB41_4: ; %end
; GFX11-NEXT: v_readlane_b32 s51, v32, 7
; GFX11-NEXT: v_readlane_b32 s50, v32, 6
; GFX11-NEXT: v_readlane_b32 s49, v32, 5
@@ -62882,9 +61877,9 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB43_3
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB43_4
+; VI-NEXT: s_cbranch_execnz .LBB43_3
; VI-NEXT: .LBB43_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v32, 0x40c00000
; VI-NEXT: s_lshl_b32 s4, s50, 16
@@ -63478,10 +62473,8 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; VI-NEXT: v_cndmask_b32_e32 v31, v32, v34, vcc
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v33
; VI-NEXT: v_lshrrev_b64 v[31:32], 16, v[31:32]
-; VI-NEXT: s_branch .LBB43_5
+; VI-NEXT: s_branch .LBB43_4
; VI-NEXT: .LBB43_3:
-; VI-NEXT: s_branch .LBB43_2
-; VI-NEXT: .LBB43_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -63514,7 +62507,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB43_5: ; %end
+; VI-NEXT: .LBB43_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v35, 15
; VI-NEXT: v_readlane_b32 s66, v35, 14
; VI-NEXT: v_readlane_b32 s65, v35, 13
@@ -63593,9 +62586,9 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
+; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s51, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v16, 0x40c00000
@@ -64207,10 +63200,8 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX9-NEXT: v_and_b32_sdwa v16, v32, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v33
; GFX9-NEXT: v_lshl_or_b32 v16, v32, 16, v16
-; GFX9-NEXT: s_branch .LBB43_5
+; GFX9-NEXT: s_branch .LBB43_4
; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
-; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -64243,7 +63234,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB43_5: ; %end
+; GFX9-NEXT: .LBB43_4: ; %end
; GFX9-NEXT: v_readlane_b32 s67, v36, 15
; GFX9-NEXT: v_readlane_b32 s66, v36, 14
; GFX9-NEXT: v_readlane_b32 s65, v36, 13
@@ -64323,10 +63314,10 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s67, 15
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s0, s51, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s51, 16
@@ -64949,10 +63940,8 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v34
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v33.l
-; GFX11-TRUE16-NEXT: s_branch .LBB43_5
+; GFX11-TRUE16-NEXT: s_branch .LBB43_4
; GFX11-TRUE16-NEXT: .LBB43_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB43_2
-; GFX11-TRUE16-NEXT: .LBB43_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -64969,7 +63958,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-TRUE16-NEXT: .LBB43_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB43_4: ; %end
; GFX11-TRUE16-NEXT: v_readlane_b32 s67, v40, 15
; GFX11-TRUE16-NEXT: v_readlane_b32 s66, v40, 14
; GFX11-TRUE16-NEXT: v_readlane_b32 s65, v40, 13
@@ -65049,10 +64038,10 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s67, 15
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB43_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-FAKE16-NEXT: .LBB43_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_and_b32 s1, s51, 0xffff0000
; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s51, 16
@@ -65706,10 +64695,8 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v32, 16, v37
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v36, 16, v33
-; GFX11-FAKE16-NEXT: s_branch .LBB43_5
+; GFX11-FAKE16-NEXT: s_branch .LBB43_4
; GFX11-FAKE16-NEXT: .LBB43_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB43_2
-; GFX11-FAKE16-NEXT: .LBB43_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -65726,7 +64713,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-FAKE16-NEXT: .LBB43_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB43_4: ; %end
; GFX11-FAKE16-NEXT: v_readlane_b32 s67, v40, 15
; GFX11-FAKE16-NEXT: v_readlane_b32 s66, v40, 14
; GFX11-FAKE16-NEXT: v_readlane_b32 s65, v40, 13
@@ -66274,7 +65261,7 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 ; 4-byte Folded Spill
-; SI-NEXT: s_cbranch_scc0 .LBB45_3
+; SI-NEXT: s_cbranch_scc0 .LBB45_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s69, s5, 16
; SI-NEXT: s_lshr_b32 s68, s7, 16
@@ -66308,7 +65295,7 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[90:91], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[94:95], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB45_4
+; SI-NEXT: s_cbranch_execnz .LBB45_3
; SI-NEXT: .LBB45_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v31, s5, 1.0
; SI-NEXT: v_add_f32_e64 v30, s4, 1.0
@@ -66377,42 +65364,8 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v1
-; SI-NEXT: s_branch .LBB45_5
+; SI-NEXT: s_branch .LBB45_4
; SI-NEXT: .LBB45_3:
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr65
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr66
-; SI-NEXT: ; implicit-def: $sgpr67
-; SI-NEXT: ; implicit-def: $sgpr68
-; SI-NEXT: ; implicit-def: $sgpr69
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: s_branch .LBB45_2
-; SI-NEXT: .LBB45_4:
; SI-NEXT: v_mov_b32_e32 v33, s30
; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
@@ -66482,7 +65435,7 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v54, s90
; SI-NEXT: v_mov_b32_e32 v39, s92
; SI-NEXT: v_mov_b32_e32 v40, s94
-; SI-NEXT: .LBB45_5: ; %end
+; SI-NEXT: .LBB45_4: ; %end
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32
; SI-NEXT: v_or_b32_e32 v1, v1, v32
@@ -66661,9 +65614,9 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB45_3
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB45_4
+; VI-NEXT: s_cbranch_execnz .LBB45_3
; VI-NEXT: .LBB45_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v15, s31, 1.0
; VI-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -66697,10 +65650,8 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v18, s38, 1.0
; VI-NEXT: v_add_f32_e64 v17, s37, 1.0
; VI-NEXT: v_add_f32_e64 v16, s36, 1.0
-; VI-NEXT: s_branch .LBB45_5
+; VI-NEXT: s_branch .LBB45_4
; VI-NEXT: .LBB45_3:
-; VI-NEXT: s_branch .LBB45_2
-; VI-NEXT: .LBB45_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v16, s36
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -66733,7 +65684,7 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s49
; VI-NEXT: v_mov_b32_e32 v30, s50
; VI-NEXT: v_mov_b32_e32 v31, s51
-; VI-NEXT: .LBB45_5: ; %end
+; VI-NEXT: .LBB45_4: ; %end
; VI-NEXT: v_readlane_b32 s51, v32, 9
; VI-NEXT: v_readlane_b32 s50, v32, 8
; VI-NEXT: v_readlane_b32 s49, v32, 7
@@ -66786,9 +65737,9 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB45_4
+; GFX9-NEXT: s_cbranch_execnz .LBB45_3
; GFX9-NEXT: .LBB45_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v15, s31, 1.0
; GFX9-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -66822,10 +65773,8 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v18, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v17, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v16, s36, 1.0
-; GFX9-NEXT: s_branch .LBB45_5
+; GFX9-NEXT: s_branch .LBB45_4
; GFX9-NEXT: .LBB45_3:
-; GFX9-NEXT: s_branch .LBB45_2
-; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v16, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -66858,7 +65807,7 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s49
; GFX9-NEXT: v_mov_b32_e32 v30, s50
; GFX9-NEXT: v_mov_b32_e32 v31, s51
-; GFX9-NEXT: .LBB45_5: ; %end
+; GFX9-NEXT: .LBB45_4: ; %end
; GFX9-NEXT: v_readlane_b32 s51, v32, 9
; GFX9-NEXT: v_readlane_b32 s50, v32, 8
; GFX9-NEXT: v_readlane_b32 s49, v32, 7
@@ -66912,10 +65861,10 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s50, v12
; GFX11-NEXT: v_writelane_b32 v32, s51, 7
; GFX11-NEXT: v_readfirstlane_b32 s51, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
; GFX11-NEXT: .LBB45_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v15, s27, 1.0
; GFX11-NEXT: v_add_f32_e64 v14, s26, 1.0
@@ -66949,10 +65898,8 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v18, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v17, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v16, s36, 1.0
-; GFX11-NEXT: s_branch .LBB45_5
+; GFX11-NEXT: s_branch .LBB45_4
; GFX11-NEXT: .LBB45_3:
-; GFX11-NEXT: s_branch .LBB45_2
-; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v16, s36 :: v_dual_mov_b32 v17, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -66969,7 +65916,7 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s46 :: v_dual_mov_b32 v27, s47
; GFX11-NEXT: v_dual_mov_b32 v28, s48 :: v_dual_mov_b32 v29, s49
; GFX11-NEXT: v_dual_mov_b32 v30, s50 :: v_dual_mov_b32 v31, s51
-; GFX11-NEXT: .LBB45_5: ; %end
+; GFX11-NEXT: .LBB45_4: ; %end
; GFX11-NEXT: v_readlane_b32 s51, v32, 7
; GFX11-NEXT: v_readlane_b32 s50, v32, 6
; GFX11-NEXT: v_readlane_b32 s49, v32, 5
@@ -68062,7 +67009,7 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v35, s65, 2
; SI-NEXT: v_writelane_b32 v35, s64, 3
-; SI-NEXT: s_cbranch_scc0 .LBB47_3
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s13, 16
@@ -68160,7 +67107,7 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, vcc_lo, 16
; SI-NEXT: s_or_b32 s67, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB47_4
+; SI-NEXT: s_cbranch_execnz .LBB47_3
; SI-NEXT: .LBB47_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s13
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -68422,11 +67369,8 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v30, v31, v30
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v32
; SI-NEXT: v_or_b32_e32 v31, v33, v31
-; SI-NEXT: s_branch .LBB47_5
+; SI-NEXT: s_branch .LBB47_4
; SI-NEXT: .LBB47_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB47_2
-; SI-NEXT: .LBB47_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -68459,7 +67403,7 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB47_5: ; %end
+; SI-NEXT: .LBB47_4: ; %end
; SI-NEXT: v_readlane_b32 s99, v34, 35
; SI-NEXT: v_readlane_b32 s98, v34, 34
; SI-NEXT: v_readlane_b32 s97, v34, 33
@@ -68559,9 +67503,9 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_4
+; VI-NEXT: s_cbranch_execnz .LBB47_3
; VI-NEXT: .LBB47_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s51, 16
; VI-NEXT: v_mov_b32_e32 v16, 0x200
@@ -68724,10 +67668,8 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; VI-NEXT: v_add_f16_sdwa v32, v32, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v16, s52, v16
; VI-NEXT: v_or_b32_e32 v16, v16, v32
-; VI-NEXT: s_branch .LBB47_5
+; VI-NEXT: s_branch .LBB47_4
; VI-NEXT: .LBB47_3:
-; VI-NEXT: s_branch .LBB47_2
-; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -68760,7 +67702,7 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB47_5: ; %end
+; VI-NEXT: .LBB47_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v33, 15
; VI-NEXT: v_readlane_b32 s66, v33, 14
; VI-NEXT: v_readlane_b32 s65, v33, 13
@@ -68839,9 +67781,9 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
+; GFX9-NEXT: s_cbranch_execnz .LBB47_3
; GFX9-NEXT: .LBB47_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v16, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, s51, v16 op_sel_hi:[1,0]
@@ -68876,10 +67818,8 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v18, s54, v16 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v17, s53, v16 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v16, s52, v16 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB47_5
+; GFX9-NEXT: s_branch .LBB47_4
; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
-; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -68912,7 +67852,7 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB47_5: ; %end
+; GFX9-NEXT: .LBB47_4: ; %end
; GFX9-NEXT: v_readlane_b32 s67, v32, 15
; GFX9-NEXT: v_readlane_b32 s66, v32, 14
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
@@ -68992,10 +67932,10 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-NEXT: v_writelane_b32 v32, s67, 15
; GFX11-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-NEXT: .LBB47_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s51 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s50 op_sel_hi:[0,1]
@@ -69029,10 +67969,8 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v18, 0x200, s54 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s53 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v16, 0x200, s52 op_sel_hi:[0,1]
-; GFX11-NEXT: s_branch .LBB47_5
+; GFX11-NEXT: s_branch .LBB47_4
; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -69049,7 +67987,7 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB47_5: ; %end
+; GFX11-NEXT: .LBB47_4: ; %end
; GFX11-NEXT: v_readlane_b32 s67, v32, 15
; GFX11-NEXT: v_readlane_b32 s66, v32, 14
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
@@ -69597,7 +68535,7 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 ; 4-byte Folded Spill
-; SI-NEXT: s_cbranch_scc0 .LBB49_3
+; SI-NEXT: s_cbranch_scc0 .LBB49_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s69, s5, 16
; SI-NEXT: s_lshr_b32 s68, s7, 16
@@ -69631,7 +68569,7 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[90:91], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[94:95], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB49_4
+; SI-NEXT: s_cbranch_execnz .LBB49_3
; SI-NEXT: .LBB49_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v31, s5, 1.0
; SI-NEXT: v_add_f32_e64 v30, s4, 1.0
@@ -69700,42 +68638,8 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v1
-; SI-NEXT: s_branch .LBB49_5
+; SI-NEXT: s_branch .LBB49_4
; SI-NEXT: .LBB49_3:
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr65
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr66
-; SI-NEXT: ; implicit-def: $sgpr67
-; SI-NEXT: ; implicit-def: $sgpr68
-; SI-NEXT: ; implicit-def: $sgpr69
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: s_branch .LBB49_2
-; SI-NEXT: .LBB49_4:
; SI-NEXT: v_mov_b32_e32 v33, s30
; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
@@ -69805,7 +68709,7 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v54, s90
; SI-NEXT: v_mov_b32_e32 v39, s92
; SI-NEXT: v_mov_b32_e32 v40, s94
-; SI-NEXT: .LBB49_5: ; %end
+; SI-NEXT: .LBB49_4: ; %end
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32
; SI-NEXT: v_or_b32_e32 v1, v1, v32
@@ -69984,9 +68888,9 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB49_3
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB49_4
+; VI-NEXT: s_cbranch_execnz .LBB49_3
; VI-NEXT: .LBB49_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v15, s31, 1.0
; VI-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -70020,10 +68924,8 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v18, s38, 1.0
; VI-NEXT: v_add_f32_e64 v17, s37, 1.0
; VI-NEXT: v_add_f32_e64 v16, s36, 1.0
-; VI-NEXT: s_branch .LBB49_5
+; VI-NEXT: s_branch .LBB49_4
; VI-NEXT: .LBB49_3:
-; VI-NEXT: s_branch .LBB49_2
-; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v16, s36
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -70056,7 +68958,7 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s49
; VI-NEXT: v_mov_b32_e32 v30, s50
; VI-NEXT: v_mov_b32_e32 v31, s51
-; VI-NEXT: .LBB49_5: ; %end
+; VI-NEXT: .LBB49_4: ; %end
; VI-NEXT: v_readlane_b32 s51, v32, 9
; VI-NEXT: v_readlane_b32 s50, v32, 8
; VI-NEXT: v_readlane_b32 s49, v32, 7
@@ -70109,9 +69011,9 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
+; GFX9-NEXT: s_cbranch_execnz .LBB49_3
; GFX9-NEXT: .LBB49_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v15, s31, 1.0
; GFX9-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -70145,10 +69047,8 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v18, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v17, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v16, s36, 1.0
-; GFX9-NEXT: s_branch .LBB49_5
+; GFX9-NEXT: s_branch .LBB49_4
; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: s_branch .LBB49_2
-; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v16, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -70181,7 +69081,7 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s49
; GFX9-NEXT: v_mov_b32_e32 v30, s50
; GFX9-NEXT: v_mov_b32_e32 v31, s51
-; GFX9-NEXT: .LBB49_5: ; %end
+; GFX9-NEXT: .LBB49_4: ; %end
; GFX9-NEXT: v_readlane_b32 s51, v32, 9
; GFX9-NEXT: v_readlane_b32 s50, v32, 8
; GFX9-NEXT: v_readlane_b32 s49, v32, 7
@@ -70235,10 +69135,10 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s50, v12
; GFX11-NEXT: v_writelane_b32 v32, s51, 7
; GFX11-NEXT: v_readfirstlane_b32 s51, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v15, s27, 1.0
; GFX11-NEXT: v_add_f32_e64 v14, s26, 1.0
@@ -70272,10 +69172,8 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v18, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v17, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v16, s36, 1.0
-; GFX11-NEXT: s_branch .LBB49_5
+; GFX11-NEXT: s_branch .LBB49_4
; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: s_branch .LBB49_2
-; GFX11-NEXT: .LBB49_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v16, s36 :: v_dual_mov_b32 v17, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -70292,7 +69190,7 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s46 :: v_dual_mov_b32 v27, s47
; GFX11-NEXT: v_dual_mov_b32 v28, s48 :: v_dual_mov_b32 v29, s49
; GFX11-NEXT: v_dual_mov_b32 v30, s50 :: v_dual_mov_b32 v31, s51
-; GFX11-NEXT: .LBB49_5: ; %end
+; GFX11-NEXT: .LBB49_4: ; %end
; GFX11-NEXT: v_readlane_b32 s51, v32, 7
; GFX11-NEXT: v_readlane_b32 s50, v32, 6
; GFX11-NEXT: v_readlane_b32 s49, v32, 5
@@ -71589,7 +70487,7 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB51_4
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB51_3
; VI-NEXT: .LBB51_2: ; %cmp.true
@@ -71787,8 +70685,6 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v30, s7
; VI-NEXT: v_mov_b32_e32 v31, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_4:
-; VI-NEXT: s_branch .LBB51_2
;
; GFX9-LABEL: bitcast_v64i16_to_v32f32_scalar:
; GFX9: ; %bb.0:
@@ -71846,9 +70742,9 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB51_4
+; GFX9-NEXT: s_cbranch_execnz .LBB51_3
; GFX9-NEXT: .LBB51_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, s51, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, s50, 3 op_sel_hi:[1,0]
@@ -71882,10 +70778,8 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v18, s54, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v17, s53, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v16, s52, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB51_5
+; GFX9-NEXT: s_branch .LBB51_4
; GFX9-NEXT: .LBB51_3:
-; GFX9-NEXT: s_branch .LBB51_2
-; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -71918,7 +70812,7 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB51_5: ; %end
+; GFX9-NEXT: .LBB51_4: ; %end
; GFX9-NEXT: v_readlane_b32 s67, v32, 15
; GFX9-NEXT: v_readlane_b32 s66, v32, 14
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
@@ -71998,10 +70892,10 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-NEXT: v_writelane_b32 v32, s67, 15
; GFX11-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-NEXT: .LBB51_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s51, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s50, 3 op_sel_hi:[1,0]
@@ -72035,10 +70929,8 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v18, s54, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v17, s53, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v16, s52, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: s_branch .LBB51_5
+; GFX11-NEXT: s_branch .LBB51_4
; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
-; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -72055,7 +70947,7 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB51_5: ; %end
+; GFX11-NEXT: .LBB51_4: ; %end
; GFX11-NEXT: v_readlane_b32 s67, v32, 15
; GFX11-NEXT: v_readlane_b32 s66, v32, 14
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
@@ -72345,7 +71237,7 @@ define inreg <16 x double> @bitcast_v16i64_to_v16f64_scalar(<16 x i64> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s46, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s47, v0
-; SI-NEXT: s_cbranch_scc0 .LBB53_4
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB53_3
; SI-NEXT: .LBB53_2: ; %cmp.true
@@ -72415,8 +71307,6 @@ define inreg <16 x double> @bitcast_v16i64_to_v16f64_scalar(<16 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v30, s7
; SI-NEXT: v_mov_b32_e32 v31, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB53_4:
-; SI-NEXT: s_branch .LBB53_2
;
; VI-LABEL: bitcast_v16i64_to_v16f64_scalar:
; VI: ; %bb.0:
@@ -72441,7 +71331,7 @@ define inreg <16 x double> @bitcast_v16i64_to_v16f64_scalar(<16 x i64> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB53_4
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB53_3
; VI-NEXT: .LBB53_2: ; %cmp.true
@@ -72511,8 +71401,6 @@ define inreg <16 x double> @bitcast_v16i64_to_v16f64_scalar(<16 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v30, s7
; VI-NEXT: v_mov_b32_e32 v31, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB53_4:
-; VI-NEXT: s_branch .LBB53_2
;
; GFX9-LABEL: bitcast_v16i64_to_v16f64_scalar:
; GFX9: ; %bb.0:
@@ -72537,7 +71425,7 @@ define inreg <16 x double> @bitcast_v16i64_to_v16f64_scalar(<16 x i64> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s46, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s47, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB53_3
; GFX9-NEXT: .LBB53_2: ; %cmp.true
@@ -72607,8 +71495,6 @@ define inreg <16 x double> @bitcast_v16i64_to_v16f64_scalar(<16 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v30, s7
; GFX9-NEXT: v_mov_b32_e32 v31, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB53_4:
-; GFX9-NEXT: s_branch .LBB53_2
;
; GFX11-LABEL: bitcast_v16i64_to_v16f64_scalar:
; GFX11: ; %bb.0:
@@ -72630,7 +71516,7 @@ define inreg <16 x double> @bitcast_v16i64_to_v16f64_scalar(<16 x i64> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s41, v0
; GFX11-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-NEXT: s_mov_b32 s42, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
@@ -72685,8 +71571,6 @@ define inreg <16 x double> @bitcast_v16i64_to_v16f64_scalar(<16 x i64> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v28, s7 :: v_dual_mov_b32 v29, s6
; GFX11-NEXT: v_dual_mov_b32 v30, s5 :: v_dual_mov_b32 v31, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -72917,9 +71801,9 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB55_3
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB55_4
+; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -72937,10 +71821,8 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; SI-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
; SI-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
; SI-NEXT: v_add_f64 v[30:31], s[66:67], 1.0
-; SI-NEXT: s_branch .LBB55_5
+; SI-NEXT: s_branch .LBB55_4
; SI-NEXT: .LBB55_3:
-; SI-NEXT: s_branch .LBB55_2
-; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -72973,7 +71855,7 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB55_5: ; %end
+; SI-NEXT: .LBB55_4: ; %end
; SI-NEXT: v_readlane_b32 s67, v32, 15
; SI-NEXT: v_readlane_b32 s66, v32, 14
; SI-NEXT: v_readlane_b32 s65, v32, 13
@@ -73052,9 +71934,9 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_4
+; VI-NEXT: s_cbranch_execnz .LBB55_3
; VI-NEXT: .LBB55_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -73072,10 +71954,8 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; VI-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
; VI-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
; VI-NEXT: v_add_f64 v[30:31], s[66:67], 1.0
-; VI-NEXT: s_branch .LBB55_5
+; VI-NEXT: s_branch .LBB55_4
; VI-NEXT: .LBB55_3:
-; VI-NEXT: s_branch .LBB55_2
-; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -73108,7 +71988,7 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB55_5: ; %end
+; VI-NEXT: .LBB55_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v32, 15
; VI-NEXT: v_readlane_b32 s66, v32, 14
; VI-NEXT: v_readlane_b32 s65, v32, 13
@@ -73187,9 +72067,9 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
+; GFX9-NEXT: s_cbranch_execnz .LBB55_3
; GFX9-NEXT: .LBB55_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -73207,10 +72087,8 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
; GFX9-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
; GFX9-NEXT: v_add_f64 v[30:31], s[66:67], 1.0
-; GFX9-NEXT: s_branch .LBB55_5
+; GFX9-NEXT: s_branch .LBB55_4
; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
-; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -73243,7 +72121,7 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB55_5: ; %end
+; GFX9-NEXT: .LBB55_4: ; %end
; GFX9-NEXT: v_readlane_b32 s67, v32, 15
; GFX9-NEXT: v_readlane_b32 s66, v32, 14
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
@@ -73323,10 +72201,10 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-NEXT: v_writelane_b32 v32, s67, 15
; GFX11-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-NEXT: .LBB55_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -73344,10 +72222,8 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
; GFX11-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
; GFX11-NEXT: v_add_f64 v[30:31], s[66:67], 1.0
-; GFX11-NEXT: s_branch .LBB55_5
+; GFX11-NEXT: s_branch .LBB55_4
; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
-; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -73364,7 +72240,7 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB55_5: ; %end
+; GFX11-NEXT: .LBB55_4: ; %end
; GFX11-NEXT: v_readlane_b32 s67, v32, 15
; GFX11-NEXT: v_readlane_b32 s66, v32, 14
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
@@ -79501,7 +78377,7 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s44, v1
; GFX9-NEXT: v_writelane_b32 v29, s99, 35
; GFX9-NEXT: ; implicit-def: $vgpr30 : SGPR spill to VGPR lane
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s46, s5, 24
; GFX9-NEXT: v_writelane_b32 v30, s46, 0
@@ -80147,154 +79023,6 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB57_4:
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr81
-; GFX9-NEXT: ; implicit-def: $sgpr53
-; GFX9-NEXT: ; implicit-def: $sgpr83
-; GFX9-NEXT: ; implicit-def: $sgpr50
-; GFX9-NEXT: ; implicit-def: $sgpr52
-; GFX9-NEXT: ; implicit-def: $sgpr69
-; GFX9-NEXT: ; implicit-def: $sgpr54
-; GFX9-NEXT: ; implicit-def: $sgpr68
-; GFX9-NEXT: ; implicit-def: $sgpr38
-; GFX9-NEXT: ; implicit-def: $sgpr48
-; GFX9-NEXT: ; implicit-def: $sgpr70
-; GFX9-NEXT: ; implicit-def: $sgpr39
-; GFX9-NEXT: ; implicit-def: $sgpr80
-; GFX9-NEXT: ; implicit-def: $sgpr99
-; GFX9-NEXT: ; implicit-def: $sgpr49
-; GFX9-NEXT: ; implicit-def: $sgpr64
-; GFX9-NEXT: ; implicit-def: $sgpr97
-; GFX9-NEXT: ; implicit-def: $sgpr66
-; GFX9-NEXT: ; implicit-def: $sgpr87
-; GFX9-NEXT: ; implicit-def: $sgpr98
-; GFX9-NEXT: ; implicit-def: $sgpr55
-; GFX9-NEXT: ; implicit-def: $sgpr86
-; GFX9-NEXT: ; implicit-def: $sgpr65
-; GFX9-NEXT: ; implicit-def: $sgpr84
-; GFX9-NEXT: ; implicit-def: $sgpr82
-; GFX9-NEXT: ; implicit-def: $sgpr67
-; GFX9-NEXT: ; implicit-def: $sgpr51
-; GFX9-NEXT: ; implicit-def: $sgpr71
-; GFX9-NEXT: ; implicit-def: $sgpr96
-; GFX9-NEXT: ; implicit-def: $sgpr85
-; GFX9-NEXT: ; implicit-def: $sgpr36
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB57_2
;
; GFX11-LABEL: bitcast_v16i64_to_v128i8_scalar:
; GFX11: ; %bb.0:
@@ -80366,7 +79094,7 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX11-NEXT: v_writelane_b32 v24, s85, 29
; GFX11-NEXT: v_writelane_b32 v24, s86, 30
; GFX11-NEXT: v_writelane_b32 v24, s87, 31
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s42, s5, 24
; GFX11-NEXT: s_lshr_b32 vcc_hi, s27, 24
@@ -80941,144 +79669,6 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX11-NEXT: s_mov_b32 exec_lo, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB57_4:
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr104
-; GFX11-NEXT: ; implicit-def: $sgpr103
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $sgpr102
-; GFX11-NEXT: ; implicit-def: $sgpr83
-; GFX11-NEXT: ; implicit-def: $sgpr81
-; GFX11-NEXT: ; implicit-def: $sgpr101
-; GFX11-NEXT: ; implicit-def: $sgpr100
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr99
-; GFX11-NEXT: ; implicit-def: $sgpr71
-; GFX11-NEXT: ; implicit-def: $sgpr70
-; GFX11-NEXT: ; implicit-def: $sgpr98
-; GFX11-NEXT: ; implicit-def: $sgpr97
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr96
-; GFX11-NEXT: ; implicit-def: $sgpr68
-; GFX11-NEXT: ; implicit-def: $sgpr67
-; GFX11-NEXT: ; implicit-def: $sgpr87
-; GFX11-NEXT: ; implicit-def: $sgpr86
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr85
-; GFX11-NEXT: ; implicit-def: $sgpr66
-; GFX11-NEXT: ; implicit-def: $sgpr65
-; GFX11-NEXT: ; implicit-def: $sgpr82
-; GFX11-NEXT: ; implicit-def: $sgpr84
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr69
-; GFX11-NEXT: ; implicit-def: $sgpr64
-; GFX11-NEXT: ; implicit-def: $sgpr55
-; GFX11-NEXT: ; implicit-def: $sgpr53
-; GFX11-NEXT: ; implicit-def: $sgpr80
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr51
-; GFX11-NEXT: ; implicit-def: $sgpr52
-; GFX11-NEXT: ; implicit-def: $sgpr50
-; GFX11-NEXT: ; implicit-def: $sgpr49
-; GFX11-NEXT: ; implicit-def: $sgpr54
-; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr48
-; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr37
-; GFX11-NEXT: ; implicit-def: $sgpr35
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $vcc_hi
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: s_branch .LBB57_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -89920,7 +88510,7 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v31
; GFX11-TRUE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v10, 0xc0c0004
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
@@ -90365,9 +88955,6 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:324
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB59_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB59_2
;
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v16i64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -90493,7 +89080,7 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v31
; GFX11-FAKE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v10, 0xc0c0004
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(1)
@@ -90938,9 +89525,6 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32 offset:324
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB59_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB59_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -91831,7 +90415,7 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s9, v0
; SI-NEXT: ; implicit-def: $vgpr34 : SGPR spill to VGPR lane
-; SI-NEXT: s_cbranch_scc0 .LBB61_4
+; SI-NEXT: s_cbranch_scc0 .LBB61_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s70, 0xffff0000
; SI-NEXT: s_waitcnt expcnt(0)
@@ -92181,76 +90765,6 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB61_4:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr68
-; SI-NEXT: ; implicit-def: $sgpr69
-; SI-NEXT: ; implicit-def: $sgpr66
-; SI-NEXT: ; implicit-def: $sgpr67
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr65
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
-; SI-NEXT: s_branch .LBB61_2
;
; VI-LABEL: bitcast_v16i64_to_v64bf16_scalar:
; VI: ; %bb.0:
@@ -92275,7 +90789,7 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB61_4
+; VI-NEXT: s_cbranch_scc0 .LBB61_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB61_3
; VI-NEXT: .LBB61_2: ; %cmp.true
@@ -92345,8 +90859,6 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; VI-NEXT: v_mov_b32_e32 v30, s7
; VI-NEXT: v_mov_b32_e32 v31, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB61_4:
-; VI-NEXT: s_branch .LBB61_2
;
; GFX9-LABEL: bitcast_v16i64_to_v64bf16_scalar:
; GFX9: ; %bb.0:
@@ -92371,7 +90883,7 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; GFX9-NEXT: v_readfirstlane_b32 s46, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s47, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB61_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB61_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB61_3
; GFX9-NEXT: .LBB61_2: ; %cmp.true
@@ -92441,8 +90953,6 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v30, s7
; GFX9-NEXT: v_mov_b32_e32 v31, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB61_4:
-; GFX9-NEXT: s_branch .LBB61_2
;
; GFX11-LABEL: bitcast_v16i64_to_v64bf16_scalar:
; GFX11: ; %bb.0:
@@ -92464,7 +90974,7 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; GFX11-NEXT: v_readfirstlane_b32 s41, v0
; GFX11-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-NEXT: s_mov_b32 s42, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB61_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB61_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-NEXT: s_cbranch_vccnz .LBB61_3
@@ -92519,8 +91029,6 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v28, s7 :: v_dual_mov_b32 v29, s6
; GFX11-NEXT: v_dual_mov_b32 v30, s5 :: v_dual_mov_b32 v31, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB61_4:
-; GFX11-NEXT: s_branch .LBB61_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -96412,9 +94920,9 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB63_3
+; VI-NEXT: s_cbranch_scc0 .LBB63_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB63_4
+; VI-NEXT: s_cbranch_execnz .LBB63_3
; VI-NEXT: .LBB63_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v32, 0x40c00000
; VI-NEXT: s_lshl_b32 s4, s50, 16
@@ -97008,10 +95516,8 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_cndmask_b32_e32 v31, v32, v34, vcc
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v33
; VI-NEXT: v_lshrrev_b64 v[31:32], 16, v[31:32]
-; VI-NEXT: s_branch .LBB63_5
+; VI-NEXT: s_branch .LBB63_4
; VI-NEXT: .LBB63_3:
-; VI-NEXT: s_branch .LBB63_2
-; VI-NEXT: .LBB63_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -97044,7 +95550,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB63_5: ; %end
+; VI-NEXT: .LBB63_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v35, 15
; VI-NEXT: v_readlane_b32 s66, v35, 14
; VI-NEXT: v_readlane_b32 s65, v35, 13
@@ -97123,9 +95629,9 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB63_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB63_4
+; GFX9-NEXT: s_cbranch_execnz .LBB63_3
; GFX9-NEXT: .LBB63_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s51, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v16, 0x40c00000
@@ -97737,10 +96243,8 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_and_b32_sdwa v16, v32, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v33
; GFX9-NEXT: v_lshl_or_b32 v16, v32, 16, v16
-; GFX9-NEXT: s_branch .LBB63_5
+; GFX9-NEXT: s_branch .LBB63_4
; GFX9-NEXT: .LBB63_3:
-; GFX9-NEXT: s_branch .LBB63_2
-; GFX9-NEXT: .LBB63_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -97773,7 +96277,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB63_5: ; %end
+; GFX9-NEXT: .LBB63_4: ; %end
; GFX9-NEXT: v_readlane_b32 s67, v36, 15
; GFX9-NEXT: v_readlane_b32 s66, v36, 14
; GFX9-NEXT: v_readlane_b32 s65, v36, 13
@@ -97853,10 +96357,10 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s67, 15
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB63_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB63_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB63_3
; GFX11-TRUE16-NEXT: .LBB63_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s0, s51, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s51, 16
@@ -98479,10 +96983,8 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v34
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v33.l
-; GFX11-TRUE16-NEXT: s_branch .LBB63_5
+; GFX11-TRUE16-NEXT: s_branch .LBB63_4
; GFX11-TRUE16-NEXT: .LBB63_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB63_2
-; GFX11-TRUE16-NEXT: .LBB63_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -98499,7 +97001,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-TRUE16-NEXT: .LBB63_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB63_4: ; %end
; GFX11-TRUE16-NEXT: v_readlane_b32 s67, v40, 15
; GFX11-TRUE16-NEXT: v_readlane_b32 s66, v40, 14
; GFX11-TRUE16-NEXT: v_readlane_b32 s65, v40, 13
@@ -98579,10 +97081,10 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s67, 15
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB63_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB63_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB63_3
; GFX11-FAKE16-NEXT: .LBB63_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_and_b32 s1, s51, 0xffff0000
; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s51, 16
@@ -99236,10 +97738,8 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v32, 16, v37
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v36, 16, v33
-; GFX11-FAKE16-NEXT: s_branch .LBB63_5
+; GFX11-FAKE16-NEXT: s_branch .LBB63_4
; GFX11-FAKE16-NEXT: .LBB63_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB63_2
-; GFX11-FAKE16-NEXT: .LBB63_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -99256,7 +97756,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-FAKE16-NEXT: .LBB63_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB63_4: ; %end
; GFX11-FAKE16-NEXT: v_readlane_b32 s67, v40, 15
; GFX11-FAKE16-NEXT: v_readlane_b32 s66, v40, 14
; GFX11-FAKE16-NEXT: v_readlane_b32 s65, v40, 13
@@ -99813,7 +98313,7 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; SI-NEXT: s_cmp_lg_u32 s44, 0
; SI-NEXT: v_readfirstlane_b32 s44, v0
; SI-NEXT: v_writelane_b32 v32, s69, 21
-; SI-NEXT: s_cbranch_scc0 .LBB65_4
+; SI-NEXT: s_cbranch_scc0 .LBB65_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s38, s5, 16
; SI-NEXT: s_lshr_b32 s39, s7, 16
@@ -100069,40 +98569,6 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB65_4:
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr69
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr68
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr67
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr66
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr65
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: s_branch .LBB65_2
;
; VI-LABEL: bitcast_v16i64_to_v64f16_scalar:
; VI: ; %bb.0:
@@ -100127,7 +98593,7 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB65_4
+; VI-NEXT: s_cbranch_scc0 .LBB65_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB65_3
; VI-NEXT: .LBB65_2: ; %cmp.true
@@ -100197,8 +98663,6 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v30, s7
; VI-NEXT: v_mov_b32_e32 v31, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB65_4:
-; VI-NEXT: s_branch .LBB65_2
;
; GFX9-LABEL: bitcast_v16i64_to_v64f16_scalar:
; GFX9: ; %bb.0:
@@ -100223,7 +98687,7 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s46, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s47, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB65_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB65_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB65_3
; GFX9-NEXT: .LBB65_2: ; %cmp.true
@@ -100293,8 +98757,6 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v30, s7
; GFX9-NEXT: v_mov_b32_e32 v31, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB65_4:
-; GFX9-NEXT: s_branch .LBB65_2
;
; GFX11-LABEL: bitcast_v16i64_to_v64f16_scalar:
; GFX11: ; %bb.0:
@@ -100316,7 +98778,7 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s41, v0
; GFX11-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-NEXT: s_mov_b32 s42, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB65_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB65_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-NEXT: s_cbranch_vccnz .LBB65_3
@@ -100371,8 +98833,6 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v28, s7 :: v_dual_mov_b32 v29, s6
; GFX11-NEXT: v_dual_mov_b32 v30, s5 :: v_dual_mov_b32 v31, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB65_4:
-; GFX11-NEXT: s_branch .LBB65_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -101452,7 +99912,7 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v35, s65, 2
; SI-NEXT: v_writelane_b32 v35, s64, 3
-; SI-NEXT: s_cbranch_scc0 .LBB67_3
+; SI-NEXT: s_cbranch_scc0 .LBB67_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s13, 16
@@ -101550,7 +100010,7 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, vcc_lo, 16
; SI-NEXT: s_or_b32 s67, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB67_4
+; SI-NEXT: s_cbranch_execnz .LBB67_3
; SI-NEXT: .LBB67_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s13
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -101812,11 +100272,8 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v30, v31, v30
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v32
; SI-NEXT: v_or_b32_e32 v31, v33, v31
-; SI-NEXT: s_branch .LBB67_5
+; SI-NEXT: s_branch .LBB67_4
; SI-NEXT: .LBB67_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB67_2
-; SI-NEXT: .LBB67_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -101849,7 +100306,7 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB67_5: ; %end
+; SI-NEXT: .LBB67_4: ; %end
; SI-NEXT: v_readlane_b32 s99, v34, 35
; SI-NEXT: v_readlane_b32 s98, v34, 34
; SI-NEXT: v_readlane_b32 s97, v34, 33
@@ -101949,9 +100406,9 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB67_3
+; VI-NEXT: s_cbranch_scc0 .LBB67_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB67_4
+; VI-NEXT: s_cbranch_execnz .LBB67_3
; VI-NEXT: .LBB67_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s51, 16
; VI-NEXT: v_mov_b32_e32 v16, 0x200
@@ -102114,10 +100571,8 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_add_f16_sdwa v32, v32, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v16, s52, v16
; VI-NEXT: v_or_b32_e32 v16, v16, v32
-; VI-NEXT: s_branch .LBB67_5
+; VI-NEXT: s_branch .LBB67_4
; VI-NEXT: .LBB67_3:
-; VI-NEXT: s_branch .LBB67_2
-; VI-NEXT: .LBB67_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -102150,7 +100605,7 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB67_5: ; %end
+; VI-NEXT: .LBB67_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v33, 15
; VI-NEXT: v_readlane_b32 s66, v33, 14
; VI-NEXT: v_readlane_b32 s65, v33, 13
@@ -102229,9 +100684,9 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB67_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB67_4
+; GFX9-NEXT: s_cbranch_execnz .LBB67_3
; GFX9-NEXT: .LBB67_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v16, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, s51, v16 op_sel_hi:[1,0]
@@ -102266,10 +100721,8 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v18, s54, v16 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v17, s53, v16 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v16, s52, v16 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB67_5
+; GFX9-NEXT: s_branch .LBB67_4
; GFX9-NEXT: .LBB67_3:
-; GFX9-NEXT: s_branch .LBB67_2
-; GFX9-NEXT: .LBB67_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -102302,7 +100755,7 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB67_5: ; %end
+; GFX9-NEXT: .LBB67_4: ; %end
; GFX9-NEXT: v_readlane_b32 s67, v32, 15
; GFX9-NEXT: v_readlane_b32 s66, v32, 14
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
@@ -102382,10 +100835,10 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-NEXT: v_writelane_b32 v32, s67, 15
; GFX11-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB67_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB67_3
; GFX11-NEXT: .LBB67_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s51 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s50 op_sel_hi:[0,1]
@@ -102419,10 +100872,8 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v18, 0x200, s54 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s53 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v16, 0x200, s52 op_sel_hi:[0,1]
-; GFX11-NEXT: s_branch .LBB67_5
+; GFX11-NEXT: s_branch .LBB67_4
; GFX11-NEXT: .LBB67_3:
-; GFX11-NEXT: s_branch .LBB67_2
-; GFX11-NEXT: .LBB67_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -102439,7 +100890,7 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB67_5: ; %end
+; GFX11-NEXT: .LBB67_4: ; %end
; GFX11-NEXT: v_readlane_b32 s67, v32, 15
; GFX11-NEXT: v_readlane_b32 s66, v32, 14
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
@@ -102996,7 +101447,7 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3
; SI-NEXT: s_cmp_lg_u32 s44, 0
; SI-NEXT: v_readfirstlane_b32 s44, v0
; SI-NEXT: v_writelane_b32 v32, s69, 21
-; SI-NEXT: s_cbranch_scc0 .LBB69_4
+; SI-NEXT: s_cbranch_scc0 .LBB69_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s38, s5, 16
; SI-NEXT: s_lshr_b32 s39, s7, 16
@@ -103252,40 +101703,6 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB69_4:
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr69
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr68
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr67
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr66
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr65
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: s_branch .LBB69_2
;
; VI-LABEL: bitcast_v16i64_to_v64i16_scalar:
; VI: ; %bb.0:
@@ -103310,7 +101727,7 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB69_4
+; VI-NEXT: s_cbranch_scc0 .LBB69_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB69_3
; VI-NEXT: .LBB69_2: ; %cmp.true
@@ -103380,8 +101797,6 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v30, s7
; VI-NEXT: v_mov_b32_e32 v31, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB69_4:
-; VI-NEXT: s_branch .LBB69_2
;
; GFX9-LABEL: bitcast_v16i64_to_v64i16_scalar:
; GFX9: ; %bb.0:
@@ -103406,7 +101821,7 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s46, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s47, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB69_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB69_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB69_3
; GFX9-NEXT: .LBB69_2: ; %cmp.true
@@ -103476,8 +101891,6 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v30, s7
; GFX9-NEXT: v_mov_b32_e32 v31, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB69_4:
-; GFX9-NEXT: s_branch .LBB69_2
;
; GFX11-LABEL: bitcast_v16i64_to_v64i16_scalar:
; GFX11: ; %bb.0:
@@ -103499,7 +101912,7 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s41, v0
; GFX11-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-NEXT: s_mov_b32 s42, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB69_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB69_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-NEXT: s_cbranch_vccnz .LBB69_3
@@ -103554,8 +101967,6 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v28, s7 :: v_dual_mov_b32 v29, s6
; GFX11-NEXT: v_dual_mov_b32 v30, s5 :: v_dual_mov_b32 v31, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB69_4:
-; GFX11-NEXT: s_branch .LBB69_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -104839,7 +103250,7 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB71_4
+; VI-NEXT: s_cbranch_scc0 .LBB71_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB71_3
; VI-NEXT: .LBB71_2: ; %cmp.true
@@ -105037,8 +103448,6 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v30, s7
; VI-NEXT: v_mov_b32_e32 v31, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB71_4:
-; VI-NEXT: s_branch .LBB71_2
;
; GFX9-LABEL: bitcast_v64i16_to_v16i64_scalar:
; GFX9: ; %bb.0:
@@ -105096,9 +103505,9 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB71_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB71_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB71_4
+; GFX9-NEXT: s_cbranch_execnz .LBB71_3
; GFX9-NEXT: .LBB71_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, s51, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, s50, 3 op_sel_hi:[1,0]
@@ -105132,10 +103541,8 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v18, s54, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v17, s53, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v16, s52, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB71_5
+; GFX9-NEXT: s_branch .LBB71_4
; GFX9-NEXT: .LBB71_3:
-; GFX9-NEXT: s_branch .LBB71_2
-; GFX9-NEXT: .LBB71_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -105168,7 +103575,7 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB71_5: ; %end
+; GFX9-NEXT: .LBB71_4: ; %end
; GFX9-NEXT: v_readlane_b32 s67, v32, 15
; GFX9-NEXT: v_readlane_b32 s66, v32, 14
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
@@ -105248,10 +103655,10 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-NEXT: v_writelane_b32 v32, s67, 15
; GFX11-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB71_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB71_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB71_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB71_3
; GFX11-NEXT: .LBB71_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s51, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s50, 3 op_sel_hi:[1,0]
@@ -105285,10 +103692,8 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v18, s54, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v17, s53, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v16, s52, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: s_branch .LBB71_5
+; GFX11-NEXT: s_branch .LBB71_4
; GFX11-NEXT: .LBB71_3:
-; GFX11-NEXT: s_branch .LBB71_2
-; GFX11-NEXT: .LBB71_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -105305,7 +103710,7 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB71_5: ; %end
+; GFX11-NEXT: .LBB71_4: ; %end
; GFX11-NEXT: v_readlane_b32 s67, v32, 15
; GFX11-NEXT: v_readlane_b32 s66, v32, 14
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
@@ -110670,7 +109075,7 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane
-; VI-NEXT: s_cbranch_scc0 .LBB73_3
+; VI-NEXT: s_cbranch_scc0 .LBB73_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s46, s5, 24
; VI-NEXT: v_writelane_b32 v62, s46, 10
@@ -110826,7 +109231,7 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; VI-NEXT: s_lshr_b64 s[36:37], s[20:21], 24
; VI-NEXT: s_lshr_b64 s[38:39], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[48:49], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB73_4
+; VI-NEXT: s_cbranch_execnz .LBB73_3
; VI-NEXT: .LBB73_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[13:14], s[4:5], 1.0
; VI-NEXT: v_add_f64 v[1:2], s[6:7], 1.0
@@ -111023,164 +109428,8 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v36, 8, v32
; VI-NEXT: v_lshrrev_b32_e32 v45, 16, v31
; VI-NEXT: v_lshrrev_b32_e32 v46, 8, v31
-; VI-NEXT: s_branch .LBB73_5
+; VI-NEXT: s_branch .LBB73_4
; VI-NEXT: .LBB73_3:
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr70
-; VI-NEXT: ; implicit-def: $sgpr71
-; VI-NEXT: ; implicit-def: $sgpr68
-; VI-NEXT: ; implicit-def: $sgpr69
-; VI-NEXT: ; implicit-def: $sgpr66
-; VI-NEXT: ; implicit-def: $sgpr67
-; VI-NEXT: ; implicit-def: $sgpr64
-; VI-NEXT: ; implicit-def: $sgpr65
-; VI-NEXT: ; implicit-def: $sgpr54
-; VI-NEXT: ; implicit-def: $sgpr55
-; VI-NEXT: ; implicit-def: $sgpr53
-; VI-NEXT: ; implicit-def: $sgpr87
-; VI-NEXT: ; implicit-def: $sgpr86
-; VI-NEXT: ; implicit-def: $sgpr52
-; VI-NEXT: ; implicit-def: $sgpr50
-; VI-NEXT: ; implicit-def: $sgpr51
-; VI-NEXT: ; implicit-def: $sgpr84
-; VI-NEXT: ; implicit-def: $sgpr85
-; VI-NEXT: ; implicit-def: $sgpr82
-; VI-NEXT: ; implicit-def: $sgpr83
-; VI-NEXT: ; implicit-def: $sgpr80
-; VI-NEXT: ; implicit-def: $sgpr81
-; VI-NEXT: ; implicit-def: $sgpr48
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB73_2
-; VI-NEXT: .LBB73_4:
; VI-NEXT: v_mov_b32_e32 v42, s48
; VI-NEXT: v_mov_b32_e32 v41, s38
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
@@ -111450,7 +109699,7 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v41, s46
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; VI-NEXT: .LBB73_5: ; %end
+; VI-NEXT: .LBB73_4: ; %end
; VI-NEXT: s_mov_b32 s4, 0xc0c0004
; VI-NEXT: v_perm_b32 v41, v45, v42, s4
; VI-NEXT: v_perm_b32 v31, v31, v46, s4
@@ -111902,7 +110151,7 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane
-; GFX9-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB73_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s46, s5, 24
; GFX9-NEXT: v_writelane_b32 v62, s46, 2
@@ -112050,7 +110299,7 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX9-NEXT: s_lshr_b64 s[30:31], s[20:21], 24
; GFX9-NEXT: s_lshr_b64 s[34:35], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[36:37], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB73_4
+; GFX9-NEXT: s_cbranch_execnz .LBB73_3
; GFX9-NEXT: .LBB73_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[11:12], s[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[1:2], s[6:7], 1.0
@@ -112266,156 +110515,8 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v20, 8, v49
; GFX9-NEXT: v_lshrrev_b32_e32 v59, 16, v48
; GFX9-NEXT: v_lshrrev_b32_e32 v60, 8, v48
-; GFX9-NEXT: s_branch .LBB73_5
+; GFX9-NEXT: s_branch .LBB73_4
; GFX9-NEXT: .LBB73_3:
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr80
-; GFX9-NEXT: ; implicit-def: $sgpr81
-; GFX9-NEXT: ; implicit-def: $sgpr70
-; GFX9-NEXT: ; implicit-def: $sgpr71
-; GFX9-NEXT: ; implicit-def: $sgpr68
-; GFX9-NEXT: ; implicit-def: $sgpr69
-; GFX9-NEXT: ; implicit-def: $sgpr66
-; GFX9-NEXT: ; implicit-def: $sgpr67
-; GFX9-NEXT: ; implicit-def: $sgpr64
-; GFX9-NEXT: ; implicit-def: $sgpr65
-; GFX9-NEXT: ; implicit-def: $sgpr54
-; GFX9-NEXT: ; implicit-def: $sgpr55
-; GFX9-NEXT: ; implicit-def: $sgpr52
-; GFX9-NEXT: ; implicit-def: $sgpr53
-; GFX9-NEXT: ; implicit-def: $sgpr50
-; GFX9-NEXT: ; implicit-def: $sgpr51
-; GFX9-NEXT: ; implicit-def: $sgpr48
-; GFX9-NEXT: ; implicit-def: $sgpr49
-; GFX9-NEXT: ; implicit-def: $sgpr38
-; GFX9-NEXT: ; implicit-def: $sgpr39
-; GFX9-NEXT: ; implicit-def: $sgpr98
-; GFX9-NEXT: ; implicit-def: $sgpr99
-; GFX9-NEXT: ; implicit-def: $sgpr96
-; GFX9-NEXT: ; implicit-def: $sgpr97
-; GFX9-NEXT: ; implicit-def: $sgpr86
-; GFX9-NEXT: ; implicit-def: $sgpr87
-; GFX9-NEXT: ; implicit-def: $sgpr84
-; GFX9-NEXT: ; implicit-def: $sgpr85
-; GFX9-NEXT: ; implicit-def: $sgpr82
-; GFX9-NEXT: ; implicit-def: $sgpr83
-; GFX9-NEXT: ; implicit-def: $sgpr36
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB73_2
-; GFX9-NEXT: .LBB73_4:
; GFX9-NEXT: v_mov_b32_e32 v15, s66
; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v15, s65
@@ -112697,7 +110798,7 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v27, s67
; GFX9-NEXT: v_mov_b32_e32 v20, s4
; GFX9-NEXT: v_mov_b32_e32 v51, s55
-; GFX9-NEXT: .LBB73_5: ; %end
+; GFX9-NEXT: .LBB73_4: ; %end
; GFX9-NEXT: s_mov_b32 s4, 0xc0c0004
; GFX9-NEXT: v_perm_b32 v39, v39, v15, s4
; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload
@@ -113143,7 +111244,7 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_writelane_b32 v74, s85, 29
; GFX11-NEXT: v_writelane_b32 v74, s86, 30
; GFX11-NEXT: v_writelane_b32 v74, s87, 31
-; GFX11-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB73_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s42, s11, 16
; GFX11-NEXT: s_lshr_b32 s48, s41, 24
@@ -113293,7 +111394,7 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_writelane_b32 v77, s42, 7
; GFX11-NEXT: s_lshr_b64 s[42:43], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_lo
-; GFX11-NEXT: s_cbranch_vccnz .LBB73_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB73_3
; GFX11-NEXT: .LBB73_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[21:22], s[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[11:12], s[4:5], 1.0
@@ -113407,146 +111508,8 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v73, 8, v49
; GFX11-NEXT: v_lshrrev_b32_e32 v41, 16, v48
; GFX11-NEXT: v_lshrrev_b32_e32 v42, 8, v48
-; GFX11-NEXT: s_branch .LBB73_5
+; GFX11-NEXT: s_branch .LBB73_4
; GFX11-NEXT: .LBB73_3:
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr103
-; GFX11-NEXT: ; implicit-def: $vcc_hi
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr101
-; GFX11-NEXT: ; implicit-def: $sgpr102
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr99
-; GFX11-NEXT: ; implicit-def: $sgpr100
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr97
-; GFX11-NEXT: ; implicit-def: $sgpr98
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr87
-; GFX11-NEXT: ; implicit-def: $sgpr96
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr85
-; GFX11-NEXT: ; implicit-def: $sgpr86
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr83
-; GFX11-NEXT: ; implicit-def: $sgpr84
-; GFX11-NEXT: ; implicit-def: $sgpr81
-; GFX11-NEXT: ; implicit-def: $sgpr82
-; GFX11-NEXT: ; implicit-def: $sgpr71
-; GFX11-NEXT: ; implicit-def: $sgpr80
-; GFX11-NEXT: ; implicit-def: $sgpr69
-; GFX11-NEXT: ; implicit-def: $sgpr70
-; GFX11-NEXT: ; implicit-def: $sgpr67
-; GFX11-NEXT: ; implicit-def: $sgpr68
-; GFX11-NEXT: ; implicit-def: $sgpr65
-; GFX11-NEXT: ; implicit-def: $sgpr66
-; GFX11-NEXT: ; implicit-def: $sgpr55
-; GFX11-NEXT: ; implicit-def: $sgpr64
-; GFX11-NEXT: ; implicit-def: $sgpr53
-; GFX11-NEXT: ; implicit-def: $sgpr54
-; GFX11-NEXT: ; implicit-def: $sgpr104
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $sgpr51
-; GFX11-NEXT: ; implicit-def: $sgpr52
-; GFX11-NEXT: ; implicit-def: $sgpr35
-; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr37
-; GFX11-NEXT: ; implicit-def: $sgpr49
-; GFX11-NEXT: ; implicit-def: $sgpr50
-; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr48
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: s_branch .LBB73_2
-; GFX11-NEXT: .LBB73_4:
; GFX11-NEXT: v_dual_mov_b32 v48, s0 :: v_dual_mov_b32 v49, s1
; GFX11-NEXT: v_readlane_b32 s0, v76, 0
; GFX11-NEXT: v_mov_b32_e32 v83, s36
@@ -113693,7 +111656,7 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_mov_b32_e32 v72, s0
; GFX11-NEXT: v_readlane_b32 s0, v77, 7
; GFX11-NEXT: v_mov_b32_e32 v73, s0
-; GFX11-NEXT: .LBB73_5: ; %end
+; GFX11-NEXT: .LBB73_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_perm_b32 v69, v41, v69, 0xc0c0004
; GFX11-NEXT: v_perm_b32 v48, v48, v42, 0xc0c0004
@@ -122743,7 +120706,7 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v31
; GFX11-TRUE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB75_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB75_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v10, 0xc0c0004
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
@@ -123188,9 +121151,6 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:324
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB75_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB75_2
;
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v16f64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -123316,7 +121276,7 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v31
; GFX11-FAKE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB75_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB75_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v10, 0xc0c0004
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(1)
@@ -123761,9 +121721,6 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32 offset:324
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB75_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB75_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -124576,7 +122533,7 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane
-; SI-NEXT: s_cbranch_scc0 .LBB77_3
+; SI-NEXT: s_cbranch_scc0 .LBB77_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s46, s17, 0xffff0000
; SI-NEXT: v_writelane_b32 v62, s46, 0
@@ -124646,7 +122603,7 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; SI-NEXT: s_and_b32 s61, s18, 0xffff0000
; SI-NEXT: s_lshl_b32 s60, s18, 16
; SI-NEXT: v_writelane_b32 v62, s46, 3
-; SI-NEXT: s_cbranch_execnz .LBB77_4
+; SI-NEXT: s_cbranch_execnz .LBB77_3
; SI-NEXT: .LBB77_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[22:23], s[4:5], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[22:23], 1.0
@@ -124745,78 +122702,8 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; SI-NEXT: v_mov_b32_e32 v38, v34
; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v37
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v37
-; SI-NEXT: s_branch .LBB77_5
+; SI-NEXT: s_branch .LBB77_4
; SI-NEXT: .LBB77_3:
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; kill: killed $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr65
-; SI-NEXT: ; implicit-def: $sgpr66
-; SI-NEXT: ; implicit-def: $sgpr67
-; SI-NEXT: ; implicit-def: $sgpr68
-; SI-NEXT: ; implicit-def: $sgpr69
-; SI-NEXT: ; implicit-def: $sgpr70
-; SI-NEXT: ; implicit-def: $sgpr71
-; SI-NEXT: ; implicit-def: $sgpr80
-; SI-NEXT: ; implicit-def: $sgpr81
-; SI-NEXT: ; implicit-def: $sgpr82
-; SI-NEXT: ; implicit-def: $sgpr83
-; SI-NEXT: ; implicit-def: $sgpr84
-; SI-NEXT: ; implicit-def: $sgpr85
-; SI-NEXT: ; implicit-def: $sgpr86
-; SI-NEXT: ; implicit-def: $sgpr87
-; SI-NEXT: ; implicit-def: $sgpr96
-; SI-NEXT: ; implicit-def: $sgpr97
-; SI-NEXT: ; implicit-def: $sgpr98
-; SI-NEXT: ; implicit-def: $sgpr99
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; kill: killed $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; kill: killed $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; kill: killed $sgpr46
-; SI-NEXT: s_branch .LBB77_2
-; SI-NEXT: .LBB77_4:
; SI-NEXT: v_mov_b32_e32 v0, s59
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -124901,7 +122788,7 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; SI-NEXT: v_mov_b32_e32 v5, s61
; SI-NEXT: v_mov_b32_e32 v4, s60
; SI-NEXT: v_mov_b32_e32 v3, s4
-; SI-NEXT: .LBB77_5: ; %end
+; SI-NEXT: .LBB77_4: ; %end
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v0
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v2
@@ -125162,9 +123049,9 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB77_3
+; VI-NEXT: s_cbranch_scc0 .LBB77_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB77_4
+; VI-NEXT: s_cbranch_execnz .LBB77_3
; VI-NEXT: .LBB77_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
; VI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -125182,10 +123069,8 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; VI-NEXT: v_add_f64 v[20:21], s[40:41], 1.0
; VI-NEXT: v_add_f64 v[18:19], s[38:39], 1.0
; VI-NEXT: v_add_f64 v[16:17], s[36:37], 1.0
-; VI-NEXT: s_branch .LBB77_5
+; VI-NEXT: s_branch .LBB77_4
; VI-NEXT: .LBB77_3:
-; VI-NEXT: s_branch .LBB77_2
-; VI-NEXT: .LBB77_4:
; VI-NEXT: v_mov_b32_e32 v16, s36
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v17, s37
@@ -125218,7 +123103,7 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB77_5: ; %end
+; VI-NEXT: .LBB77_4: ; %end
; VI-NEXT: v_readlane_b32 s51, v32, 9
; VI-NEXT: v_readlane_b32 s50, v32, 8
; VI-NEXT: v_readlane_b32 s49, v32, 7
@@ -125271,9 +123156,9 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB77_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB77_4
+; GFX9-NEXT: s_cbranch_execnz .LBB77_3
; GFX9-NEXT: .LBB77_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -125291,10 +123176,8 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; GFX9-NEXT: v_add_f64 v[20:21], s[40:41], 1.0
; GFX9-NEXT: v_add_f64 v[18:19], s[38:39], 1.0
; GFX9-NEXT: v_add_f64 v[16:17], s[36:37], 1.0
-; GFX9-NEXT: s_branch .LBB77_5
+; GFX9-NEXT: s_branch .LBB77_4
; GFX9-NEXT: .LBB77_3:
-; GFX9-NEXT: s_branch .LBB77_2
-; GFX9-NEXT: .LBB77_4:
; GFX9-NEXT: v_mov_b32_e32 v16, s36
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v17, s37
@@ -125327,7 +123210,7 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB77_5: ; %end
+; GFX9-NEXT: .LBB77_4: ; %end
; GFX9-NEXT: v_readlane_b32 s51, v32, 9
; GFX9-NEXT: v_readlane_b32 s50, v32, 8
; GFX9-NEXT: v_readlane_b32 s49, v32, 7
@@ -125381,10 +123264,10 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; GFX11-NEXT: v_readfirstlane_b32 s50, v12
; GFX11-NEXT: v_writelane_b32 v32, s51, 7
; GFX11-NEXT: v_readfirstlane_b32 s51, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB77_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB77_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB77_3
; GFX11-NEXT: .LBB77_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
@@ -125402,10 +123285,8 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; GFX11-NEXT: v_add_f64 v[20:21], s[40:41], 1.0
; GFX11-NEXT: v_add_f64 v[18:19], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[16:17], s[36:37], 1.0
-; GFX11-NEXT: s_branch .LBB77_5
+; GFX11-NEXT: s_branch .LBB77_4
; GFX11-NEXT: .LBB77_3:
-; GFX11-NEXT: s_branch .LBB77_2
-; GFX11-NEXT: .LBB77_4:
; GFX11-NEXT: v_dual_mov_b32 v16, s36 :: v_dual_mov_b32 v17, s37
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v18, s38 :: v_dual_mov_b32 v19, s39
@@ -125422,7 +123303,7 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: .LBB77_5: ; %end
+; GFX11-NEXT: .LBB77_4: ; %end
; GFX11-NEXT: v_readlane_b32 s51, v32, 7
; GFX11-NEXT: v_readlane_b32 s50, v32, 6
; GFX11-NEXT: v_readlane_b32 s49, v32, 5
@@ -129327,9 +127208,9 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB79_3
+; VI-NEXT: s_cbranch_scc0 .LBB79_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB79_4
+; VI-NEXT: s_cbranch_execnz .LBB79_3
; VI-NEXT: .LBB79_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v32, 0x40c00000
; VI-NEXT: s_lshl_b32 s4, s50, 16
@@ -129923,10 +127804,8 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; VI-NEXT: v_cndmask_b32_e32 v31, v32, v34, vcc
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v33
; VI-NEXT: v_lshrrev_b64 v[31:32], 16, v[31:32]
-; VI-NEXT: s_branch .LBB79_5
+; VI-NEXT: s_branch .LBB79_4
; VI-NEXT: .LBB79_3:
-; VI-NEXT: s_branch .LBB79_2
-; VI-NEXT: .LBB79_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -129959,7 +127838,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB79_5: ; %end
+; VI-NEXT: .LBB79_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v35, 15
; VI-NEXT: v_readlane_b32 s66, v35, 14
; VI-NEXT: v_readlane_b32 s65, v35, 13
@@ -130038,9 +127917,9 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB79_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB79_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB79_4
+; GFX9-NEXT: s_cbranch_execnz .LBB79_3
; GFX9-NEXT: .LBB79_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s51, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v16, 0x40c00000
@@ -130652,10 +128531,8 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX9-NEXT: v_and_b32_sdwa v16, v32, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v33
; GFX9-NEXT: v_lshl_or_b32 v16, v32, 16, v16
-; GFX9-NEXT: s_branch .LBB79_5
+; GFX9-NEXT: s_branch .LBB79_4
; GFX9-NEXT: .LBB79_3:
-; GFX9-NEXT: s_branch .LBB79_2
-; GFX9-NEXT: .LBB79_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -130688,7 +128565,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB79_5: ; %end
+; GFX9-NEXT: .LBB79_4: ; %end
; GFX9-NEXT: v_readlane_b32 s67, v36, 15
; GFX9-NEXT: v_readlane_b32 s66, v36, 14
; GFX9-NEXT: v_readlane_b32 s65, v36, 13
@@ -130768,10 +128645,10 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s67, 15
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB79_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB79_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB79_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB79_3
; GFX11-TRUE16-NEXT: .LBB79_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s0, s51, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s51, 16
@@ -131394,10 +129271,8 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v34
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v33.l
-; GFX11-TRUE16-NEXT: s_branch .LBB79_5
+; GFX11-TRUE16-NEXT: s_branch .LBB79_4
; GFX11-TRUE16-NEXT: .LBB79_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB79_2
-; GFX11-TRUE16-NEXT: .LBB79_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -131414,7 +129289,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-TRUE16-NEXT: .LBB79_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB79_4: ; %end
; GFX11-TRUE16-NEXT: v_readlane_b32 s67, v40, 15
; GFX11-TRUE16-NEXT: v_readlane_b32 s66, v40, 14
; GFX11-TRUE16-NEXT: v_readlane_b32 s65, v40, 13
@@ -131494,10 +129369,10 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s67, 15
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB79_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB79_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB79_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB79_3
; GFX11-FAKE16-NEXT: .LBB79_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_and_b32 s1, s51, 0xffff0000
; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s51, 16
@@ -132151,10 +130026,8 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v32, 16, v37
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v36, 16, v33
-; GFX11-FAKE16-NEXT: s_branch .LBB79_5
+; GFX11-FAKE16-NEXT: s_branch .LBB79_4
; GFX11-FAKE16-NEXT: .LBB79_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB79_2
-; GFX11-FAKE16-NEXT: .LBB79_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -132171,7 +130044,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-FAKE16-NEXT: .LBB79_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB79_4: ; %end
; GFX11-FAKE16-NEXT: v_readlane_b32 s67, v40, 15
; GFX11-FAKE16-NEXT: v_readlane_b32 s66, v40, 14
; GFX11-FAKE16-NEXT: v_readlane_b32 s65, v40, 13
@@ -132672,7 +130545,7 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 ; 4-byte Folded Spill
-; SI-NEXT: s_cbranch_scc0 .LBB81_3
+; SI-NEXT: s_cbranch_scc0 .LBB81_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s69, s45, 16
; SI-NEXT: s_lshr_b32 s68, s43, 16
@@ -132706,7 +130579,7 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; SI-NEXT: s_lshr_b64 s[30:31], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[34:35], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB81_4
+; SI-NEXT: s_cbranch_execnz .LBB81_3
; SI-NEXT: .LBB81_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[28:29], s[42:43], 1.0
; SI-NEXT: v_add_f64 v[26:27], s[40:41], 1.0
@@ -132760,42 +130633,8 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v1
; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v31
-; SI-NEXT: s_branch .LBB81_5
+; SI-NEXT: s_branch .LBB81_4
; SI-NEXT: .LBB81_3:
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr65
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr66
-; SI-NEXT: ; implicit-def: $sgpr67
-; SI-NEXT: ; implicit-def: $sgpr68
-; SI-NEXT: ; implicit-def: $sgpr69
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: s_branch .LBB81_2
-; SI-NEXT: .LBB81_4:
; SI-NEXT: v_mov_b32_e32 v32, s68
; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -132864,7 +130703,7 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; SI-NEXT: v_mov_b32_e32 v33, s60
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v32, s46
-; SI-NEXT: .LBB81_5: ; %end
+; SI-NEXT: .LBB81_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v39
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
; SI-NEXT: v_or_b32_e32 v2, v2, v39
@@ -133044,9 +130883,9 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB81_3
+; VI-NEXT: s_cbranch_scc0 .LBB81_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB81_4
+; VI-NEXT: s_cbranch_execnz .LBB81_3
; VI-NEXT: .LBB81_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
; VI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -133064,10 +130903,8 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; VI-NEXT: v_add_f64 v[20:21], s[40:41], 1.0
; VI-NEXT: v_add_f64 v[18:19], s[38:39], 1.0
; VI-NEXT: v_add_f64 v[16:17], s[36:37], 1.0
-; VI-NEXT: s_branch .LBB81_5
+; VI-NEXT: s_branch .LBB81_4
; VI-NEXT: .LBB81_3:
-; VI-NEXT: s_branch .LBB81_2
-; VI-NEXT: .LBB81_4:
; VI-NEXT: v_mov_b32_e32 v16, s36
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v17, s37
@@ -133100,7 +130937,7 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB81_5: ; %end
+; VI-NEXT: .LBB81_4: ; %end
; VI-NEXT: v_readlane_b32 s51, v32, 9
; VI-NEXT: v_readlane_b32 s50, v32, 8
; VI-NEXT: v_readlane_b32 s49, v32, 7
@@ -133153,9 +130990,9 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB81_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB81_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB81_4
+; GFX9-NEXT: s_cbranch_execnz .LBB81_3
; GFX9-NEXT: .LBB81_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -133173,10 +131010,8 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; GFX9-NEXT: v_add_f64 v[20:21], s[40:41], 1.0
; GFX9-NEXT: v_add_f64 v[18:19], s[38:39], 1.0
; GFX9-NEXT: v_add_f64 v[16:17], s[36:37], 1.0
-; GFX9-NEXT: s_branch .LBB81_5
+; GFX9-NEXT: s_branch .LBB81_4
; GFX9-NEXT: .LBB81_3:
-; GFX9-NEXT: s_branch .LBB81_2
-; GFX9-NEXT: .LBB81_4:
; GFX9-NEXT: v_mov_b32_e32 v16, s36
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v17, s37
@@ -133209,7 +131044,7 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB81_5: ; %end
+; GFX9-NEXT: .LBB81_4: ; %end
; GFX9-NEXT: v_readlane_b32 s51, v32, 9
; GFX9-NEXT: v_readlane_b32 s50, v32, 8
; GFX9-NEXT: v_readlane_b32 s49, v32, 7
@@ -133263,10 +131098,10 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; GFX11-NEXT: v_readfirstlane_b32 s50, v12
; GFX11-NEXT: v_writelane_b32 v32, s51, 7
; GFX11-NEXT: v_readfirstlane_b32 s51, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB81_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB81_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB81_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB81_3
; GFX11-NEXT: .LBB81_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
@@ -133284,10 +131119,8 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; GFX11-NEXT: v_add_f64 v[20:21], s[40:41], 1.0
; GFX11-NEXT: v_add_f64 v[18:19], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[16:17], s[36:37], 1.0
-; GFX11-NEXT: s_branch .LBB81_5
+; GFX11-NEXT: s_branch .LBB81_4
; GFX11-NEXT: .LBB81_3:
-; GFX11-NEXT: s_branch .LBB81_2
-; GFX11-NEXT: .LBB81_4:
; GFX11-NEXT: v_dual_mov_b32 v16, s36 :: v_dual_mov_b32 v17, s37
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v18, s38 :: v_dual_mov_b32 v19, s39
@@ -133304,7 +131137,7 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: .LBB81_5: ; %end
+; GFX11-NEXT: .LBB81_4: ; %end
; GFX11-NEXT: v_readlane_b32 s51, v32, 7
; GFX11-NEXT: v_readlane_b32 s50, v32, 6
; GFX11-NEXT: v_readlane_b32 s49, v32, 5
@@ -134397,7 +132230,7 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v35, s65, 2
; SI-NEXT: v_writelane_b32 v35, s64, 3
-; SI-NEXT: s_cbranch_scc0 .LBB83_3
+; SI-NEXT: s_cbranch_scc0 .LBB83_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s13, 16
@@ -134495,7 +132328,7 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, vcc_lo, 16
; SI-NEXT: s_or_b32 s67, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB83_4
+; SI-NEXT: s_cbranch_execnz .LBB83_3
; SI-NEXT: .LBB83_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s13
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -134757,11 +132590,8 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: v_or_b32_e32 v30, v31, v30
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v32
; SI-NEXT: v_or_b32_e32 v31, v33, v31
-; SI-NEXT: s_branch .LBB83_5
+; SI-NEXT: s_branch .LBB83_4
; SI-NEXT: .LBB83_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB83_2
-; SI-NEXT: .LBB83_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -134794,7 +132624,7 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB83_5: ; %end
+; SI-NEXT: .LBB83_4: ; %end
; SI-NEXT: v_readlane_b32 s99, v34, 35
; SI-NEXT: v_readlane_b32 s98, v34, 34
; SI-NEXT: v_readlane_b32 s97, v34, 33
@@ -134894,9 +132724,9 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB83_3
+; VI-NEXT: s_cbranch_scc0 .LBB83_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB83_4
+; VI-NEXT: s_cbranch_execnz .LBB83_3
; VI-NEXT: .LBB83_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s51, 16
; VI-NEXT: v_mov_b32_e32 v16, 0x200
@@ -135059,10 +132889,8 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; VI-NEXT: v_add_f16_sdwa v32, v32, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v16, s52, v16
; VI-NEXT: v_or_b32_e32 v16, v16, v32
-; VI-NEXT: s_branch .LBB83_5
+; VI-NEXT: s_branch .LBB83_4
; VI-NEXT: .LBB83_3:
-; VI-NEXT: s_branch .LBB83_2
-; VI-NEXT: .LBB83_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -135095,7 +132923,7 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB83_5: ; %end
+; VI-NEXT: .LBB83_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v33, 15
; VI-NEXT: v_readlane_b32 s66, v33, 14
; VI-NEXT: v_readlane_b32 s65, v33, 13
@@ -135174,9 +133002,9 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB83_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB83_4
+; GFX9-NEXT: s_cbranch_execnz .LBB83_3
; GFX9-NEXT: .LBB83_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v16, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, s51, v16 op_sel_hi:[1,0]
@@ -135211,10 +133039,8 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; GFX9-NEXT: v_pk_add_f16 v18, s54, v16 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v17, s53, v16 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v16, s52, v16 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB83_5
+; GFX9-NEXT: s_branch .LBB83_4
; GFX9-NEXT: .LBB83_3:
-; GFX9-NEXT: s_branch .LBB83_2
-; GFX9-NEXT: .LBB83_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -135247,7 +133073,7 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB83_5: ; %end
+; GFX9-NEXT: .LBB83_4: ; %end
; GFX9-NEXT: v_readlane_b32 s67, v32, 15
; GFX9-NEXT: v_readlane_b32 s66, v32, 14
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
@@ -135327,10 +133153,10 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; GFX11-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-NEXT: v_writelane_b32 v32, s67, 15
; GFX11-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB83_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB83_3
; GFX11-NEXT: .LBB83_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s51 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s50 op_sel_hi:[0,1]
@@ -135364,10 +133190,8 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; GFX11-NEXT: v_pk_add_f16 v18, 0x200, s54 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s53 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v16, 0x200, s52 op_sel_hi:[0,1]
-; GFX11-NEXT: s_branch .LBB83_5
+; GFX11-NEXT: s_branch .LBB83_4
; GFX11-NEXT: .LBB83_3:
-; GFX11-NEXT: s_branch .LBB83_2
-; GFX11-NEXT: .LBB83_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -135384,7 +133208,7 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB83_5: ; %end
+; GFX11-NEXT: .LBB83_4: ; %end
; GFX11-NEXT: v_readlane_b32 s67, v32, 15
; GFX11-NEXT: v_readlane_b32 s66, v32, 14
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
@@ -135885,7 +133709,7 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 ; 4-byte Folded Spill
-; SI-NEXT: s_cbranch_scc0 .LBB85_3
+; SI-NEXT: s_cbranch_scc0 .LBB85_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s69, s45, 16
; SI-NEXT: s_lshr_b32 s68, s43, 16
@@ -135919,7 +133743,7 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; SI-NEXT: s_lshr_b64 s[30:31], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[34:35], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB85_4
+; SI-NEXT: s_cbranch_execnz .LBB85_3
; SI-NEXT: .LBB85_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[28:29], s[42:43], 1.0
; SI-NEXT: v_add_f64 v[26:27], s[40:41], 1.0
@@ -135973,42 +133797,8 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v1
; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v31
-; SI-NEXT: s_branch .LBB85_5
+; SI-NEXT: s_branch .LBB85_4
; SI-NEXT: .LBB85_3:
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr65
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr66
-; SI-NEXT: ; implicit-def: $sgpr67
-; SI-NEXT: ; implicit-def: $sgpr68
-; SI-NEXT: ; implicit-def: $sgpr69
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: s_branch .LBB85_2
-; SI-NEXT: .LBB85_4:
; SI-NEXT: v_mov_b32_e32 v32, s68
; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -136077,7 +133867,7 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v33, s60
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v32, s46
-; SI-NEXT: .LBB85_5: ; %end
+; SI-NEXT: .LBB85_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v39
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
; SI-NEXT: v_or_b32_e32 v2, v2, v39
@@ -136257,9 +134047,9 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB85_3
+; VI-NEXT: s_cbranch_scc0 .LBB85_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB85_4
+; VI-NEXT: s_cbranch_execnz .LBB85_3
; VI-NEXT: .LBB85_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
; VI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -136277,10 +134067,8 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; VI-NEXT: v_add_f64 v[20:21], s[40:41], 1.0
; VI-NEXT: v_add_f64 v[18:19], s[38:39], 1.0
; VI-NEXT: v_add_f64 v[16:17], s[36:37], 1.0
-; VI-NEXT: s_branch .LBB85_5
+; VI-NEXT: s_branch .LBB85_4
; VI-NEXT: .LBB85_3:
-; VI-NEXT: s_branch .LBB85_2
-; VI-NEXT: .LBB85_4:
; VI-NEXT: v_mov_b32_e32 v16, s36
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v17, s37
@@ -136313,7 +134101,7 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB85_5: ; %end
+; VI-NEXT: .LBB85_4: ; %end
; VI-NEXT: v_readlane_b32 s51, v32, 9
; VI-NEXT: v_readlane_b32 s50, v32, 8
; VI-NEXT: v_readlane_b32 s49, v32, 7
@@ -136366,9 +134154,9 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB85_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB85_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB85_4
+; GFX9-NEXT: s_cbranch_execnz .LBB85_3
; GFX9-NEXT: .LBB85_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -136386,10 +134174,8 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[20:21], s[40:41], 1.0
; GFX9-NEXT: v_add_f64 v[18:19], s[38:39], 1.0
; GFX9-NEXT: v_add_f64 v[16:17], s[36:37], 1.0
-; GFX9-NEXT: s_branch .LBB85_5
+; GFX9-NEXT: s_branch .LBB85_4
; GFX9-NEXT: .LBB85_3:
-; GFX9-NEXT: s_branch .LBB85_2
-; GFX9-NEXT: .LBB85_4:
; GFX9-NEXT: v_mov_b32_e32 v16, s36
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v17, s37
@@ -136422,7 +134208,7 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB85_5: ; %end
+; GFX9-NEXT: .LBB85_4: ; %end
; GFX9-NEXT: v_readlane_b32 s51, v32, 9
; GFX9-NEXT: v_readlane_b32 s50, v32, 8
; GFX9-NEXT: v_readlane_b32 s49, v32, 7
@@ -136476,10 +134262,10 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s50, v12
; GFX11-NEXT: v_writelane_b32 v32, s51, 7
; GFX11-NEXT: v_readfirstlane_b32 s51, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB85_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB85_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB85_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB85_3
; GFX11-NEXT: .LBB85_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
@@ -136497,10 +134283,8 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[20:21], s[40:41], 1.0
; GFX11-NEXT: v_add_f64 v[18:19], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[16:17], s[36:37], 1.0
-; GFX11-NEXT: s_branch .LBB85_5
+; GFX11-NEXT: s_branch .LBB85_4
; GFX11-NEXT: .LBB85_3:
-; GFX11-NEXT: s_branch .LBB85_2
-; GFX11-NEXT: .LBB85_4:
; GFX11-NEXT: v_dual_mov_b32 v16, s36 :: v_dual_mov_b32 v17, s37
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v18, s38 :: v_dual_mov_b32 v19, s39
@@ -136517,7 +134301,7 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: .LBB85_5: ; %end
+; GFX11-NEXT: .LBB85_4: ; %end
; GFX11-NEXT: v_readlane_b32 s51, v32, 7
; GFX11-NEXT: v_readlane_b32 s50, v32, 6
; GFX11-NEXT: v_readlane_b32 s49, v32, 5
@@ -137814,7 +135598,7 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB87_4
+; VI-NEXT: s_cbranch_scc0 .LBB87_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB87_3
; VI-NEXT: .LBB87_2: ; %cmp.true
@@ -138012,8 +135796,6 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v30, s7
; VI-NEXT: v_mov_b32_e32 v31, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB87_4:
-; VI-NEXT: s_branch .LBB87_2
;
; GFX9-LABEL: bitcast_v64i16_to_v16f64_scalar:
; GFX9: ; %bb.0:
@@ -138071,9 +135853,9 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB87_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB87_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB87_4
+; GFX9-NEXT: s_cbranch_execnz .LBB87_3
; GFX9-NEXT: .LBB87_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, s51, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, s50, 3 op_sel_hi:[1,0]
@@ -138107,10 +135889,8 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v18, s54, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v17, s53, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v16, s52, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB87_5
+; GFX9-NEXT: s_branch .LBB87_4
; GFX9-NEXT: .LBB87_3:
-; GFX9-NEXT: s_branch .LBB87_2
-; GFX9-NEXT: .LBB87_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -138143,7 +135923,7 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB87_5: ; %end
+; GFX9-NEXT: .LBB87_4: ; %end
; GFX9-NEXT: v_readlane_b32 s67, v32, 15
; GFX9-NEXT: v_readlane_b32 s66, v32, 14
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
@@ -138223,10 +136003,10 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s66, v12
; GFX11-NEXT: v_writelane_b32 v32, s67, 15
; GFX11-NEXT: v_readfirstlane_b32 s67, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB87_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB87_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB87_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB87_3
; GFX11-NEXT: .LBB87_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s51, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s50, 3 op_sel_hi:[1,0]
@@ -138260,10 +136040,8 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v18, s54, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v17, s53, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v16, s52, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: s_branch .LBB87_5
+; GFX11-NEXT: s_branch .LBB87_4
; GFX11-NEXT: .LBB87_3:
-; GFX11-NEXT: s_branch .LBB87_2
-; GFX11-NEXT: .LBB87_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -138280,7 +136058,7 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB87_5: ; %end
+; GFX11-NEXT: .LBB87_4: ; %end
; GFX11-NEXT: v_readlane_b32 s67, v32, 15
; GFX11-NEXT: v_readlane_b32 s66, v32, 14
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
@@ -148055,7 +145833,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v31
; GFX11-TRUE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB89_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB89_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, 0xc0c0004 :: v_dual_lshlrev_b32 v13, 8, v86
@@ -148451,10 +146229,6 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:328
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB89_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB89_2
;
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v64bf16_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -148578,7 +146352,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v31
; GFX11-FAKE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB89_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB89_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v8, 0xc0c0004
; GFX11-FAKE16-NEXT: s_and_b32 s76, s74, 0xff
@@ -149037,10 +146811,6 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32 offset:320 ; 4-byte Folded Reload
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB89_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB89_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -158305,7 +156075,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane
-; VI-NEXT: s_cbranch_scc0 .LBB91_3
+; VI-NEXT: s_cbranch_scc0 .LBB91_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s46, s5, 24
; VI-NEXT: v_writelane_b32 v62, s46, 2
@@ -158461,7 +156231,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; VI-NEXT: s_lshr_b64 s[36:37], s[40:41], 24
; VI-NEXT: s_lshr_b64 s[38:39], s[42:43], 24
; VI-NEXT: s_lshr_b64 s[48:49], s[44:45], 24
-; VI-NEXT: s_cbranch_execnz .LBB91_4
+; VI-NEXT: s_cbranch_execnz .LBB91_3
; VI-NEXT: .LBB91_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s46, s45, 16
; VI-NEXT: v_mov_b32_e32 v2, 0x40c00000
@@ -159281,164 +157051,8 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_lshrrev_b32_e32 v57, 8, v36
; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v53
; VI-NEXT: v_lshrrev_b32_e32 v53, 8, v53
-; VI-NEXT: s_branch .LBB91_5
+; VI-NEXT: s_branch .LBB91_4
; VI-NEXT: .LBB91_3:
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr80
-; VI-NEXT: ; implicit-def: $sgpr81
-; VI-NEXT: ; implicit-def: $sgpr84
-; VI-NEXT: ; implicit-def: $sgpr85
-; VI-NEXT: ; implicit-def: $sgpr82
-; VI-NEXT: ; implicit-def: $sgpr83
-; VI-NEXT: ; implicit-def: $sgpr50
-; VI-NEXT: ; implicit-def: $sgpr86
-; VI-NEXT: ; implicit-def: $sgpr87
-; VI-NEXT: ; implicit-def: $sgpr51
-; VI-NEXT: ; implicit-def: $sgpr52
-; VI-NEXT: ; implicit-def: $sgpr53
-; VI-NEXT: ; implicit-def: $sgpr65
-; VI-NEXT: ; implicit-def: $sgpr66
-; VI-NEXT: ; implicit-def: $sgpr54
-; VI-NEXT: ; implicit-def: $sgpr55
-; VI-NEXT: ; implicit-def: $sgpr64
-; VI-NEXT: ; implicit-def: $sgpr70
-; VI-NEXT: ; implicit-def: $sgpr71
-; VI-NEXT: ; implicit-def: $sgpr67
-; VI-NEXT: ; implicit-def: $sgpr68
-; VI-NEXT: ; implicit-def: $sgpr69
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr48
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB91_2
-; VI-NEXT: .LBB91_4:
; VI-NEXT: v_readlane_b32 s47, v62, 0
; VI-NEXT: v_mov_b32_e32 v1, s47
; VI-NEXT: v_readlane_b32 s47, v62, 1
@@ -159714,7 +157328,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_mov_b32_e32 v43, s67
; VI-NEXT: v_mov_b32_e32 v7, v20
; VI-NEXT: v_mov_b32_e32 v20, v10
-; VI-NEXT: .LBB91_5: ; %end
+; VI-NEXT: .LBB91_4: ; %end
; VI-NEXT: s_mov_b32 s4, 0xc0c0004
; VI-NEXT: v_perm_b32 v6, v33, v6, s4
; VI-NEXT: v_perm_b32 v26, v26, v58, s4
@@ -160167,7 +157781,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane
-; GFX9-NEXT: s_cbranch_scc0 .LBB91_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB91_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s46, s5, 24
; GFX9-NEXT: v_writelane_b32 v62, s46, 10
@@ -160315,7 +157929,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_lshr_b64 s[30:31], s[40:41], 24
; GFX9-NEXT: s_lshr_b64 s[34:35], s[42:43], 24
; GFX9-NEXT: s_lshr_b64 s[36:37], s[44:45], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB91_4
+; GFX9-NEXT: s_cbranch_execnz .LBB91_3
; GFX9-NEXT: .LBB91_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s46, s45, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v8, 0x40c00000
@@ -161117,156 +158731,8 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v22, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: s_branch .LBB91_5
+; GFX9-NEXT: s_branch .LBB91_4
; GFX9-NEXT: .LBB91_3:
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr66
-; GFX9-NEXT: ; implicit-def: $sgpr81
-; GFX9-NEXT: ; implicit-def: $sgpr83
-; GFX9-NEXT: ; implicit-def: $sgpr84
-; GFX9-NEXT: ; implicit-def: $sgpr80
-; GFX9-NEXT: ; implicit-def: $sgpr87
-; GFX9-NEXT: ; implicit-def: $sgpr96
-; GFX9-NEXT: ; implicit-def: $sgpr85
-; GFX9-NEXT: ; implicit-def: $sgpr71
-; GFX9-NEXT: ; implicit-def: $sgpr82
-; GFX9-NEXT: ; implicit-def: $sgpr99
-; GFX9-NEXT: ; implicit-def: $sgpr38
-; GFX9-NEXT: ; implicit-def: $sgpr97
-; GFX9-NEXT: ; implicit-def: $sgpr70
-; GFX9-NEXT: ; implicit-def: $sgpr86
-; GFX9-NEXT: ; implicit-def: $sgpr49
-; GFX9-NEXT: ; implicit-def: $sgpr50
-; GFX9-NEXT: ; implicit-def: $sgpr39
-; GFX9-NEXT: ; implicit-def: $sgpr69
-; GFX9-NEXT: ; implicit-def: $sgpr98
-; GFX9-NEXT: ; implicit-def: $sgpr53
-; GFX9-NEXT: ; implicit-def: $sgpr54
-; GFX9-NEXT: ; implicit-def: $sgpr51
-; GFX9-NEXT: ; implicit-def: $sgpr68
-; GFX9-NEXT: ; implicit-def: $sgpr48
-; GFX9-NEXT: ; implicit-def: $sgpr64
-; GFX9-NEXT: ; implicit-def: $sgpr65
-; GFX9-NEXT: ; implicit-def: $sgpr55
-; GFX9-NEXT: ; implicit-def: $sgpr67
-; GFX9-NEXT: ; implicit-def: $sgpr52
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr36
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB91_2
-; GFX9-NEXT: .LBB91_4:
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v1, s28
@@ -161524,7 +158990,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v50, s76
; GFX9-NEXT: v_mov_b32_e32 v26, s34
; GFX9-NEXT: v_mov_b32_e32 v32, s36
-; GFX9-NEXT: .LBB91_5: ; %end
+; GFX9-NEXT: .LBB91_4: ; %end
; GFX9-NEXT: s_mov_b32 s4, 0xc0c0004
; GFX9-NEXT: v_perm_b32 v18, v18, v25, s4
; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
@@ -161973,7 +159439,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: v_writelane_b32 v74, s85, 29
; GFX11-TRUE16-NEXT: v_writelane_b32 v74, s86, 30
; GFX11-TRUE16-NEXT: v_writelane_b32 v74, s87, 31
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB91_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB91_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 24
; GFX11-TRUE16-NEXT: s_lshr_b32 s83, s5, 24
@@ -162123,7 +159589,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: v_writelane_b32 v77, s42, 5
; GFX11-TRUE16-NEXT: s_lshr_b64 s[42:43], s[0:1], 24
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_lo
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB91_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB91_3
; GFX11-TRUE16-NEXT: .LBB91_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_lshl_b32 s42, s29, 16
; GFX11-TRUE16-NEXT: s_and_b32 s29, s29, 0xffff0000
@@ -162846,146 +160312,8 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 8, v6
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v178, 16, v5
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 8, v5
-; GFX11-TRUE16-NEXT: s_branch .LBB91_5
+; GFX11-TRUE16-NEXT: s_branch .LBB91_4
; GFX11-TRUE16-NEXT: .LBB91_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr36
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr37
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr34
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr104
-; GFX11-TRUE16-NEXT: ; implicit-def: $vcc_hi
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr48
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr49
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr38
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr103
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr35
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr52
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr53
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr50
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr102
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr39
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr64
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr65
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr54
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr101
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr51
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr68
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr69
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr66
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr100
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr55
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr80
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr81
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr70
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr99
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr67
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr84
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr85
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr82
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr98
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr71
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr87
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr96
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr86
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr97
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr83
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr30
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr94
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr92
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; kill: killed $sgpr43
-; GFX11-TRUE16-NEXT: s_branch .LBB91_2
-; GFX11-TRUE16-NEXT: .LBB91_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v112, s2 :: v_dual_mov_b32 v101, s0
; GFX11-TRUE16-NEXT: v_readlane_b32 s0, v76, 0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s41 :: v_dual_mov_b32 v145, s26
@@ -163132,7 +160460,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v62, s0
; GFX11-TRUE16-NEXT: v_readlane_b32 s0, v77, 7
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v63, s0
-; GFX11-TRUE16-NEXT: .LBB91_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB91_4: ; %end
; GFX11-TRUE16-NEXT: v_perm_b32 v2, v72, v48, 0xc0c0004
; GFX11-TRUE16-NEXT: v_perm_b32 v48, v101, v73, 0xc0c0004
; GFX11-TRUE16-NEXT: v_perm_b32 v101, v103, v62, 0xc0c0004
@@ -163431,7 +160759,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: v_writelane_b32 v74, s85, 29
; GFX11-FAKE16-NEXT: v_writelane_b32 v74, s86, 30
; GFX11-FAKE16-NEXT: v_writelane_b32 v74, s87, 31
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB91_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB91_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 24
; GFX11-FAKE16-NEXT: s_lshr_b32 s83, s5, 24
@@ -163581,7 +160909,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: v_writelane_b32 v77, s42, 5
; GFX11-FAKE16-NEXT: s_lshr_b64 s[42:43], s[0:1], 24
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_lo
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB91_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB91_3
; GFX11-FAKE16-NEXT: .LBB91_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s42, s29, 16
; GFX11-FAKE16-NEXT: s_and_b32 s29, s29, 0xffff0000
@@ -164308,146 +161636,8 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 8, v6
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v178, 16, v5
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 8, v5
-; GFX11-FAKE16-NEXT: s_branch .LBB91_5
+; GFX11-FAKE16-NEXT: s_branch .LBB91_4
; GFX11-FAKE16-NEXT: .LBB91_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr36
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr37
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr34
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr104
-; GFX11-FAKE16-NEXT: ; implicit-def: $vcc_hi
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr48
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr49
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr38
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr103
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr35
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr52
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr53
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr50
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr102
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr39
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr64
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr65
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr54
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr101
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr51
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr68
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr69
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr66
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr100
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr55
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr80
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr81
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr70
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr99
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr67
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr84
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr85
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr82
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr98
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr71
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr87
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr96
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr86
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr97
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr83
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr30
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr94
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr92
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; kill: killed $sgpr43
-; GFX11-FAKE16-NEXT: s_branch .LBB91_2
-; GFX11-FAKE16-NEXT: .LBB91_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v100, s2 :: v_dual_mov_b32 v97, s0
; GFX11-FAKE16-NEXT: v_readlane_b32 s0, v76, 0
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v50, s41
@@ -164595,7 +161785,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v62, s0
; GFX11-FAKE16-NEXT: v_readlane_b32 s0, v77, 7
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v63, s0
-; GFX11-FAKE16-NEXT: .LBB91_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB91_4: ; %end
; GFX11-FAKE16-NEXT: v_perm_b32 v2, v72, v36, 0xc0c0004
; GFX11-FAKE16-NEXT: v_perm_b32 v36, v97, v73, 0xc0c0004
; GFX11-FAKE16-NEXT: v_perm_b32 v97, v99, v62, 0xc0c0004
@@ -174597,7 +171787,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v31
; GFX11-TRUE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB93_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB93_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, 0xc0c0004 :: v_dual_lshlrev_b32 v13, 8, v86
@@ -174993,10 +172183,6 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:328
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB93_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB93_2
;
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v64f16_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -175120,7 +172306,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v31
; GFX11-FAKE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB93_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB93_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v8, 0xc0c0004
; GFX11-FAKE16-NEXT: s_and_b32 s76, s74, 0xff
@@ -175579,10 +172765,6 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32 offset:320 ; 4-byte Folded Reload
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB93_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB93_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -182109,7 +179291,7 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane
-; VI-NEXT: s_cbranch_scc0 .LBB95_3
+; VI-NEXT: s_cbranch_scc0 .LBB95_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s46, s5, 24
; VI-NEXT: v_writelane_b32 v62, s46, 16
@@ -182265,7 +179447,7 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; VI-NEXT: s_lshr_b64 s[36:37], s[40:41], 24
; VI-NEXT: s_lshr_b64 s[38:39], s[42:43], 24
; VI-NEXT: s_lshr_b64 s[48:49], s[44:45], 24
-; VI-NEXT: s_cbranch_execnz .LBB95_4
+; VI-NEXT: s_cbranch_execnz .LBB95_3
; VI-NEXT: .LBB95_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s46, s45, 16
; VI-NEXT: v_mov_b32_e32 v7, 0x200
@@ -182565,164 +179747,8 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_bfe_u32 v15, v49, 8, 8
; VI-NEXT: v_bfe_u32 v22, v36, 8, 8
; VI-NEXT: v_bfe_u32 v30, v43, 8, 8
-; VI-NEXT: s_branch .LBB95_5
+; VI-NEXT: s_branch .LBB95_4
; VI-NEXT: .LBB95_3:
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr71
-; VI-NEXT: ; implicit-def: $sgpr70
-; VI-NEXT: ; implicit-def: $sgpr51
-; VI-NEXT: ; implicit-def: $sgpr69
-; VI-NEXT: ; implicit-def: $sgpr68
-; VI-NEXT: ; implicit-def: $sgpr50
-; VI-NEXT: ; implicit-def: $sgpr67
-; VI-NEXT: ; implicit-def: $sgpr66
-; VI-NEXT: ; implicit-def: $sgpr83
-; VI-NEXT: ; implicit-def: $sgpr65
-; VI-NEXT: ; implicit-def: $sgpr64
-; VI-NEXT: ; implicit-def: $sgpr82
-; VI-NEXT: ; implicit-def: $sgpr55
-; VI-NEXT: ; implicit-def: $sgpr54
-; VI-NEXT: ; implicit-def: $sgpr81
-; VI-NEXT: ; implicit-def: $sgpr87
-; VI-NEXT: ; implicit-def: $sgpr86
-; VI-NEXT: ; implicit-def: $sgpr80
-; VI-NEXT: ; implicit-def: $sgpr85
-; VI-NEXT: ; implicit-def: $sgpr84
-; VI-NEXT: ; implicit-def: $sgpr53
-; VI-NEXT: ; implicit-def: $sgpr52
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr48
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB95_2
-; VI-NEXT: .LBB95_4:
; VI-NEXT: v_mov_b32_e32 v1, s44
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v1, s45
@@ -182978,7 +180004,7 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v8, s36
; VI-NEXT: v_mov_b32_e32 v24, s38
; VI-NEXT: v_mov_b32_e32 v14, s48
-; VI-NEXT: .LBB95_5: ; %end
+; VI-NEXT: .LBB95_4: ; %end
; VI-NEXT: s_mov_b32 s4, 0xc0c0004
; VI-NEXT: v_perm_b32 v20, v23, v20, s4
; VI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
@@ -183422,7 +180448,7 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane
-; GFX9-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB95_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s46, s5, 24
; GFX9-NEXT: v_writelane_b32 v62, s46, 49
@@ -183570,7 +180596,7 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b64 s[30:31], s[40:41], 24
; GFX9-NEXT: s_lshr_b64 s[34:35], s[42:43], 24
; GFX9-NEXT: s_lshr_b64 s[36:37], s[44:45], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB95_4
+; GFX9-NEXT: s_cbranch_execnz .LBB95_3
; GFX9-NEXT: .LBB95_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v15, 0x200
; GFX9-NEXT: v_pk_add_f16 v26, s5, v15 op_sel_hi:[1,0]
@@ -183797,156 +180823,8 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v22, 8, v19
; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
-; GFX9-NEXT: s_branch .LBB95_5
+; GFX9-NEXT: s_branch .LBB95_4
; GFX9-NEXT: .LBB95_3:
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr81
-; GFX9-NEXT: ; implicit-def: $sgpr71
-; GFX9-NEXT: ; implicit-def: $sgpr80
-; GFX9-NEXT: ; implicit-def: $sgpr70
-; GFX9-NEXT: ; implicit-def: $sgpr69
-; GFX9-NEXT: ; implicit-def: $sgpr68
-; GFX9-NEXT: ; implicit-def: $sgpr66
-; GFX9-NEXT: ; implicit-def: $sgpr67
-; GFX9-NEXT: ; implicit-def: $sgpr65
-; GFX9-NEXT: ; implicit-def: $sgpr64
-; GFX9-NEXT: ; implicit-def: $sgpr55
-; GFX9-NEXT: ; implicit-def: $sgpr53
-; GFX9-NEXT: ; implicit-def: $sgpr54
-; GFX9-NEXT: ; implicit-def: $sgpr52
-; GFX9-NEXT: ; implicit-def: $sgpr51
-; GFX9-NEXT: ; implicit-def: $sgpr50
-; GFX9-NEXT: ; implicit-def: $sgpr48
-; GFX9-NEXT: ; implicit-def: $sgpr49
-; GFX9-NEXT: ; implicit-def: $sgpr39
-; GFX9-NEXT: ; implicit-def: $sgpr38
-; GFX9-NEXT: ; implicit-def: $sgpr99
-; GFX9-NEXT: ; implicit-def: $sgpr97
-; GFX9-NEXT: ; implicit-def: $sgpr98
-; GFX9-NEXT: ; implicit-def: $sgpr96
-; GFX9-NEXT: ; implicit-def: $sgpr87
-; GFX9-NEXT: ; implicit-def: $sgpr86
-; GFX9-NEXT: ; implicit-def: $sgpr84
-; GFX9-NEXT: ; implicit-def: $sgpr85
-; GFX9-NEXT: ; implicit-def: $sgpr83
-; GFX9-NEXT: ; implicit-def: $sgpr82
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr36
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB95_2
-; GFX9-NEXT: .LBB95_4:
; GFX9-NEXT: v_mov_b32_e32 v15, s81
; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v15, s71
@@ -184224,7 +181102,7 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
-; GFX9-NEXT: .LBB95_5: ; %end
+; GFX9-NEXT: .LBB95_4: ; %end
; GFX9-NEXT: s_mov_b32 s4, 0xc0c0004
; GFX9-NEXT: v_perm_b32 v37, v37, v52, s4
; GFX9-NEXT: v_perm_b32 v31, v31, v53, s4
@@ -184667,7 +181545,7 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_writelane_b32 v74, s85, 29
; GFX11-NEXT: v_writelane_b32 v74, s86, 30
; GFX11-NEXT: v_writelane_b32 v74, s87, 31
-; GFX11-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB95_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s42, s27, 24
; GFX11-NEXT: s_lshr_b32 vcc_hi, s5, 24
@@ -184816,7 +181694,7 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_writelane_b32 v76, s42, 0
; GFX11-NEXT: s_lshr_b64 s[42:43], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_lo
-; GFX11-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB95_3
; GFX11-NEXT: .LBB95_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v28, 0x200, s19 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v18, 0x200, s27 op_sel_hi:[0,1]
@@ -184946,146 +181824,8 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_lshrrev_b32_e32 v132, 8, v16
; GFX11-NEXT: v_lshrrev_b32_e32 v145, 16, v15
; GFX11-NEXT: v_lshrrev_b32_e32 v134, 8, v15
-; GFX11-NEXT: s_branch .LBB95_5
+; GFX11-NEXT: s_branch .LBB95_4
; GFX11-NEXT: .LBB95_3:
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr104
-; GFX11-NEXT: ; implicit-def: $sgpr102
-; GFX11-NEXT: ; implicit-def: $sgpr103
-; GFX11-NEXT: ; implicit-def: $sgpr101
-; GFX11-NEXT: ; implicit-def: $sgpr100
-; GFX11-NEXT: ; implicit-def: $sgpr99
-; GFX11-NEXT: ; implicit-def: $sgpr97
-; GFX11-NEXT: ; implicit-def: $sgpr98
-; GFX11-NEXT: ; implicit-def: $sgpr96
-; GFX11-NEXT: ; implicit-def: $sgpr87
-; GFX11-NEXT: ; implicit-def: $sgpr86
-; GFX11-NEXT: ; implicit-def: $sgpr84
-; GFX11-NEXT: ; implicit-def: $sgpr85
-; GFX11-NEXT: ; implicit-def: $sgpr83
-; GFX11-NEXT: ; implicit-def: $sgpr82
-; GFX11-NEXT: ; implicit-def: $sgpr81
-; GFX11-NEXT: ; implicit-def: $sgpr71
-; GFX11-NEXT: ; implicit-def: $sgpr80
-; GFX11-NEXT: ; implicit-def: $sgpr70
-; GFX11-NEXT: ; implicit-def: $sgpr69
-; GFX11-NEXT: ; implicit-def: $sgpr68
-; GFX11-NEXT: ; implicit-def: $sgpr66
-; GFX11-NEXT: ; implicit-def: $sgpr67
-; GFX11-NEXT: ; implicit-def: $sgpr65
-; GFX11-NEXT: ; implicit-def: $sgpr64
-; GFX11-NEXT: ; implicit-def: $sgpr55
-; GFX11-NEXT: ; implicit-def: $sgpr53
-; GFX11-NEXT: ; implicit-def: $sgpr54
-; GFX11-NEXT: ; implicit-def: $sgpr52
-; GFX11-NEXT: ; implicit-def: $sgpr51
-; GFX11-NEXT: ; implicit-def: $sgpr50
-; GFX11-NEXT: ; implicit-def: $sgpr48
-; GFX11-NEXT: ; implicit-def: $sgpr49
-; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr37
-; GFX11-NEXT: ; implicit-def: $sgpr35
-; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $vcc_hi
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: s_branch .LBB95_2
-; GFX11-NEXT: .LBB95_4:
; GFX11-NEXT: v_dual_mov_b32 v35, s0 :: v_dual_mov_b32 v36, s1
; GFX11-NEXT: v_readlane_b32 s0, v76, 0
; GFX11-NEXT: v_dual_mov_b32 v15, s28 :: v_dual_mov_b32 v16, s29
@@ -185231,7 +181971,7 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_mov_b32_e32 v147, s0
; GFX11-NEXT: v_readlane_b32 s0, v77, 7
; GFX11-NEXT: v_mov_b32_e32 v148, s0
-; GFX11-NEXT: .LBB95_5: ; %end
+; GFX11-NEXT: .LBB95_4: ; %end
; GFX11-NEXT: v_perm_b32 v68, v73, v70, 0xc0c0004
; GFX11-NEXT: v_perm_b32 v70, v62, v72, 0xc0c0004
; GFX11-NEXT: v_perm_b32 v69, v60, v69, 0xc0c0004
@@ -195232,7 +191972,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v31
; GFX11-TRUE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB97_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB97_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, 0xc0c0004 :: v_dual_lshlrev_b32 v13, 8, v86
@@ -195628,10 +192368,6 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:328
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB97_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB97_2
;
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v64i16_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -195755,7 +192491,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v31
; GFX11-FAKE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB97_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB97_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v8, 0xc0c0004
; GFX11-FAKE16-NEXT: s_and_b32 s76, s74, 0xff
@@ -196214,10 +192950,6 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32 offset:320 ; 4-byte Folded Reload
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB97_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB97_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -202738,7 +199470,7 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s44, v1
; VI-NEXT: v_writelane_b32 v32, s87, 31
; VI-NEXT: ; implicit-def: $vgpr33 : SGPR spill to VGPR lane
-; VI-NEXT: s_cbranch_scc0 .LBB99_4
+; VI-NEXT: s_cbranch_scc0 .LBB99_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s46, s45, 24
; VI-NEXT: v_writelane_b32 v33, s46, 0
@@ -203563,162 +200295,6 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB99_4:
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr71
-; VI-NEXT: ; implicit-def: $sgpr69
-; VI-NEXT: ; implicit-def: $sgpr67
-; VI-NEXT: ; implicit-def: $sgpr68
-; VI-NEXT: ; implicit-def: $sgpr65
-; VI-NEXT: ; implicit-def: $sgpr53
-; VI-NEXT: ; implicit-def: $sgpr50
-; VI-NEXT: ; implicit-def: $sgpr84
-; VI-NEXT: ; implicit-def: $sgpr83
-; VI-NEXT: ; implicit-def: $sgpr86
-; VI-NEXT: ; implicit-def: $sgpr64
-; VI-NEXT: ; implicit-def: $sgpr81
-; VI-NEXT: ; implicit-def: $sgpr66
-; VI-NEXT: ; implicit-def: $sgpr54
-; VI-NEXT: ; implicit-def: $sgpr87
-; VI-NEXT: ; implicit-def: $sgpr85
-; VI-NEXT: ; implicit-def: $sgpr55
-; VI-NEXT: ; implicit-def: $sgpr51
-; VI-NEXT: ; implicit-def: $sgpr80
-; VI-NEXT: ; implicit-def: $sgpr52
-; VI-NEXT: ; implicit-def: $sgpr82
-; VI-NEXT: ; implicit-def: $sgpr70
-; VI-NEXT: ; implicit-def: $sgpr48
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB99_2
;
; GFX9-LABEL: bitcast_v64i16_to_v128i8_scalar:
; GFX9: ; %bb.0:
@@ -203798,7 +200374,7 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane
-; GFX9-NEXT: s_cbranch_scc0 .LBB99_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB99_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s46, s5, 24
; GFX9-NEXT: v_writelane_b32 v62, s46, 49
@@ -203946,7 +200522,7 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: s_lshr_b64 s[30:31], s[40:41], 24
; GFX9-NEXT: s_lshr_b64 s[34:35], s[42:43], 24
; GFX9-NEXT: s_lshr_b64 s[36:37], s[44:45], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB99_4
+; GFX9-NEXT: s_cbranch_execnz .LBB99_3
; GFX9-NEXT: .LBB99_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v26, s5, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v25, s4, 3 op_sel_hi:[1,0]
@@ -204172,156 +200748,8 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v22, 8, v19
; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
-; GFX9-NEXT: s_branch .LBB99_5
+; GFX9-NEXT: s_branch .LBB99_4
; GFX9-NEXT: .LBB99_3:
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr81
-; GFX9-NEXT: ; implicit-def: $sgpr71
-; GFX9-NEXT: ; implicit-def: $sgpr80
-; GFX9-NEXT: ; implicit-def: $sgpr70
-; GFX9-NEXT: ; implicit-def: $sgpr69
-; GFX9-NEXT: ; implicit-def: $sgpr68
-; GFX9-NEXT: ; implicit-def: $sgpr66
-; GFX9-NEXT: ; implicit-def: $sgpr67
-; GFX9-NEXT: ; implicit-def: $sgpr65
-; GFX9-NEXT: ; implicit-def: $sgpr64
-; GFX9-NEXT: ; implicit-def: $sgpr55
-; GFX9-NEXT: ; implicit-def: $sgpr53
-; GFX9-NEXT: ; implicit-def: $sgpr54
-; GFX9-NEXT: ; implicit-def: $sgpr52
-; GFX9-NEXT: ; implicit-def: $sgpr51
-; GFX9-NEXT: ; implicit-def: $sgpr50
-; GFX9-NEXT: ; implicit-def: $sgpr48
-; GFX9-NEXT: ; implicit-def: $sgpr49
-; GFX9-NEXT: ; implicit-def: $sgpr39
-; GFX9-NEXT: ; implicit-def: $sgpr38
-; GFX9-NEXT: ; implicit-def: $sgpr99
-; GFX9-NEXT: ; implicit-def: $sgpr97
-; GFX9-NEXT: ; implicit-def: $sgpr98
-; GFX9-NEXT: ; implicit-def: $sgpr96
-; GFX9-NEXT: ; implicit-def: $sgpr87
-; GFX9-NEXT: ; implicit-def: $sgpr86
-; GFX9-NEXT: ; implicit-def: $sgpr84
-; GFX9-NEXT: ; implicit-def: $sgpr85
-; GFX9-NEXT: ; implicit-def: $sgpr83
-; GFX9-NEXT: ; implicit-def: $sgpr82
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr36
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB99_2
-; GFX9-NEXT: .LBB99_4:
; GFX9-NEXT: v_mov_b32_e32 v15, s81
; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v15, s71
@@ -204599,7 +201027,7 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
-; GFX9-NEXT: .LBB99_5: ; %end
+; GFX9-NEXT: .LBB99_4: ; %end
; GFX9-NEXT: s_mov_b32 s4, 0xc0c0004
; GFX9-NEXT: v_perm_b32 v37, v37, v52, s4
; GFX9-NEXT: v_perm_b32 v31, v31, v53, s4
@@ -205042,7 +201470,7 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_writelane_b32 v74, s85, 29
; GFX11-NEXT: v_writelane_b32 v74, s86, 30
; GFX11-NEXT: v_writelane_b32 v74, s87, 31
-; GFX11-NEXT: s_cbranch_scc0 .LBB99_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB99_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s42, s27, 24
; GFX11-NEXT: s_lshr_b32 vcc_hi, s5, 24
@@ -205191,7 +201619,7 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_writelane_b32 v76, s42, 0
; GFX11-NEXT: s_lshr_b64 s[42:43], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_lo
-; GFX11-NEXT: s_cbranch_vccnz .LBB99_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB99_3
; GFX11-NEXT: .LBB99_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v28, s19, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v18, s27, 3 op_sel_hi:[1,0]
@@ -205321,146 +201749,8 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_lshrrev_b32_e32 v132, 8, v16
; GFX11-NEXT: v_lshrrev_b32_e32 v145, 16, v15
; GFX11-NEXT: v_lshrrev_b32_e32 v134, 8, v15
-; GFX11-NEXT: s_branch .LBB99_5
+; GFX11-NEXT: s_branch .LBB99_4
; GFX11-NEXT: .LBB99_3:
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr104
-; GFX11-NEXT: ; implicit-def: $sgpr102
-; GFX11-NEXT: ; implicit-def: $sgpr103
-; GFX11-NEXT: ; implicit-def: $sgpr101
-; GFX11-NEXT: ; implicit-def: $sgpr100
-; GFX11-NEXT: ; implicit-def: $sgpr99
-; GFX11-NEXT: ; implicit-def: $sgpr97
-; GFX11-NEXT: ; implicit-def: $sgpr98
-; GFX11-NEXT: ; implicit-def: $sgpr96
-; GFX11-NEXT: ; implicit-def: $sgpr87
-; GFX11-NEXT: ; implicit-def: $sgpr86
-; GFX11-NEXT: ; implicit-def: $sgpr84
-; GFX11-NEXT: ; implicit-def: $sgpr85
-; GFX11-NEXT: ; implicit-def: $sgpr83
-; GFX11-NEXT: ; implicit-def: $sgpr82
-; GFX11-NEXT: ; implicit-def: $sgpr81
-; GFX11-NEXT: ; implicit-def: $sgpr71
-; GFX11-NEXT: ; implicit-def: $sgpr80
-; GFX11-NEXT: ; implicit-def: $sgpr70
-; GFX11-NEXT: ; implicit-def: $sgpr69
-; GFX11-NEXT: ; implicit-def: $sgpr68
-; GFX11-NEXT: ; implicit-def: $sgpr66
-; GFX11-NEXT: ; implicit-def: $sgpr67
-; GFX11-NEXT: ; implicit-def: $sgpr65
-; GFX11-NEXT: ; implicit-def: $sgpr64
-; GFX11-NEXT: ; implicit-def: $sgpr55
-; GFX11-NEXT: ; implicit-def: $sgpr53
-; GFX11-NEXT: ; implicit-def: $sgpr54
-; GFX11-NEXT: ; implicit-def: $sgpr52
-; GFX11-NEXT: ; implicit-def: $sgpr51
-; GFX11-NEXT: ; implicit-def: $sgpr50
-; GFX11-NEXT: ; implicit-def: $sgpr48
-; GFX11-NEXT: ; implicit-def: $sgpr49
-; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr37
-; GFX11-NEXT: ; implicit-def: $sgpr35
-; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $vcc_hi
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: s_branch .LBB99_2
-; GFX11-NEXT: .LBB99_4:
; GFX11-NEXT: v_dual_mov_b32 v35, s0 :: v_dual_mov_b32 v36, s1
; GFX11-NEXT: v_readlane_b32 s0, v76, 0
; GFX11-NEXT: v_dual_mov_b32 v15, s28 :: v_dual_mov_b32 v16, s29
@@ -205606,7 +201896,7 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_mov_b32_e32 v147, s0
; GFX11-NEXT: v_readlane_b32 s0, v77, 7
; GFX11-NEXT: v_mov_b32_e32 v148, s0
-; GFX11-NEXT: .LBB99_5: ; %end
+; GFX11-NEXT: .LBB99_4: ; %end
; GFX11-NEXT: v_perm_b32 v68, v73, v70, 0xc0c0004
; GFX11-NEXT: v_perm_b32 v70, v62, v72, 0xc0c0004
; GFX11-NEXT: v_perm_b32 v69, v60, v69, 0xc0c0004
@@ -210912,9 +207202,9 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB101_3
+; VI-NEXT: s_cbranch_scc0 .LBB101_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB101_4
+; VI-NEXT: s_cbranch_execnz .LBB101_3
; VI-NEXT: .LBB101_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v32, 0x40c00000
; VI-NEXT: s_lshl_b32 s4, s48, 16
@@ -211508,10 +207798,8 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; VI-NEXT: v_cndmask_b32_e32 v31, v32, v34, vcc
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v33
; VI-NEXT: v_lshrrev_b64 v[31:32], 16, v[31:32]
-; VI-NEXT: s_branch .LBB101_5
+; VI-NEXT: s_branch .LBB101_4
; VI-NEXT: .LBB101_3:
-; VI-NEXT: s_branch .LBB101_2
-; VI-NEXT: .LBB101_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v16, s36
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -211544,7 +207832,7 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; VI-NEXT: v_mov_b32_e32 v29, s49
; VI-NEXT: v_mov_b32_e32 v30, s50
; VI-NEXT: v_mov_b32_e32 v31, s51
-; VI-NEXT: .LBB101_5: ; %end
+; VI-NEXT: .LBB101_4: ; %end
; VI-NEXT: v_readlane_b32 s51, v35, 9
; VI-NEXT: v_readlane_b32 s50, v35, 8
; VI-NEXT: v_readlane_b32 s49, v35, 7
@@ -211597,9 +207885,9 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB101_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB101_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB101_4
+; GFX9-NEXT: s_cbranch_execnz .LBB101_3
; GFX9-NEXT: .LBB101_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v16, 0x40c00000
; GFX9-NEXT: s_and_b32 s4, s50, 0xffff0000
@@ -212211,10 +208499,8 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; GFX9-NEXT: v_and_b32_sdwa v16, v32, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v33
; GFX9-NEXT: v_lshl_or_b32 v16, v32, 16, v16
-; GFX9-NEXT: s_branch .LBB101_5
+; GFX9-NEXT: s_branch .LBB101_4
; GFX9-NEXT: .LBB101_3:
-; GFX9-NEXT: s_branch .LBB101_2
-; GFX9-NEXT: .LBB101_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v16, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -212247,7 +208533,7 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; GFX9-NEXT: v_mov_b32_e32 v29, s49
; GFX9-NEXT: v_mov_b32_e32 v30, s50
; GFX9-NEXT: v_mov_b32_e32 v31, s51
-; GFX9-NEXT: .LBB101_5: ; %end
+; GFX9-NEXT: .LBB101_4: ; %end
; GFX9-NEXT: v_readlane_b32 s51, v36, 9
; GFX9-NEXT: v_readlane_b32 s50, v36, 8
; GFX9-NEXT: v_readlane_b32 s49, v36, 7
@@ -212301,10 +208587,10 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s50, v12
; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s51, 7
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s51, v13
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB101_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB101_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB101_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB101_3
; GFX11-TRUE16-NEXT: .LBB101_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s0, s36, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s36, 16
@@ -212919,10 +209205,8 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v64.l
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v84
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v87.l
-; GFX11-TRUE16-NEXT: s_branch .LBB101_5
+; GFX11-TRUE16-NEXT: s_branch .LBB101_4
; GFX11-TRUE16-NEXT: .LBB101_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB101_2
-; GFX11-TRUE16-NEXT: .LBB101_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s36 :: v_dual_mov_b32 v17, s37
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -212939,7 +209223,7 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s46 :: v_dual_mov_b32 v27, s47
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s48 :: v_dual_mov_b32 v29, s49
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s50 :: v_dual_mov_b32 v31, s51
-; GFX11-TRUE16-NEXT: .LBB101_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB101_4: ; %end
; GFX11-TRUE16-NEXT: v_readlane_b32 s51, v40, 7
; GFX11-TRUE16-NEXT: v_readlane_b32 s50, v40, 6
; GFX11-TRUE16-NEXT: v_readlane_b32 s49, v40, 5
@@ -212991,10 +209275,10 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s50, v12
; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s51, 7
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s51, v13
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB101_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB101_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB101_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB101_3
; GFX11-FAKE16-NEXT: .LBB101_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_and_b32 s0, s36, 0xffff0000
; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s36, 16
@@ -213655,10 +209939,8 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v33, 16, v38
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v32, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v16, 16, v48
-; GFX11-FAKE16-NEXT: s_branch .LBB101_5
+; GFX11-FAKE16-NEXT: s_branch .LBB101_4
; GFX11-FAKE16-NEXT: .LBB101_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB101_2
-; GFX11-FAKE16-NEXT: .LBB101_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s36 :: v_dual_mov_b32 v17, s37
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -213675,7 +209957,7 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s46 :: v_dual_mov_b32 v27, s47
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s48 :: v_dual_mov_b32 v29, s49
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s50 :: v_dual_mov_b32 v31, s51
-; GFX11-FAKE16-NEXT: .LBB101_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB101_4: ; %end
; GFX11-FAKE16-NEXT: v_readlane_b32 s51, v40, 7
; GFX11-FAKE16-NEXT: v_readlane_b32 s50, v40, 6
; GFX11-FAKE16-NEXT: v_readlane_b32 s49, v40, 5
@@ -215153,7 +211435,7 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 ; 4-byte Folded Spill
-; SI-NEXT: s_cbranch_scc0 .LBB103_3
+; SI-NEXT: s_cbranch_scc0 .LBB103_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s4, s16, 16
; SI-NEXT: v_writelane_b32 v62, s4, 35
@@ -215246,7 +211528,7 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; SI-NEXT: s_lshl_b32 s97, s51, 16
; SI-NEXT: s_lshl_b32 s98, s52, 16
; SI-NEXT: s_lshl_b32 s14, s55, 16
-; SI-NEXT: s_cbranch_execnz .LBB103_4
+; SI-NEXT: s_cbranch_execnz .LBB103_3
; SI-NEXT: .LBB103_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s55
; SI-NEXT: v_cvt_f32_f16_e32 v1, s52
@@ -215570,101 +211852,8 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; SI-NEXT: s_branch .LBB103_5
+; SI-NEXT: s_branch .LBB103_4
; SI-NEXT: .LBB103_3:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr99
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr65
-; SI-NEXT: ; implicit-def: $sgpr66
-; SI-NEXT: ; implicit-def: $sgpr67
-; SI-NEXT: ; implicit-def: $sgpr68
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr69
-; SI-NEXT: ; implicit-def: $sgpr70
-; SI-NEXT: ; implicit-def: $sgpr71
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr80
-; SI-NEXT: ; implicit-def: $sgpr81
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr82
-; SI-NEXT: ; implicit-def: $sgpr83
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr84
-; SI-NEXT: ; implicit-def: $sgpr85
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr86
-; SI-NEXT: ; implicit-def: $sgpr87
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr96
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr97
-; SI-NEXT: ; implicit-def: $sgpr98
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB103_2
-; SI-NEXT: .LBB103_4:
; SI-NEXT: v_readlane_b32 s4, v62, 9
; SI-NEXT: v_mov_b32_e32 v26, s4
; SI-NEXT: v_readlane_b32 s4, v62, 10
@@ -215770,7 +211959,7 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; SI-NEXT: v_mov_b32_e32 v27, s99
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
-; SI-NEXT: .LBB103_5: ; %end
+; SI-NEXT: .LBB103_4: ; %end
; SI-NEXT: v_mul_f32_e32 v34, 1.0, v0
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v3
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v0
@@ -216029,9 +212218,9 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB103_3
+; VI-NEXT: s_cbranch_scc0 .LBB103_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB103_4
+; VI-NEXT: s_cbranch_execnz .LBB103_3
; VI-NEXT: .LBB103_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s50, 16
; VI-NEXT: v_mov_b32_e32 v0, s4
@@ -216194,10 +212383,8 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; VI-NEXT: v_add_f16_sdwa v32, v32, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v16, s36, v16
; VI-NEXT: v_or_b32_e32 v16, v16, v32
-; VI-NEXT: s_branch .LBB103_5
+; VI-NEXT: s_branch .LBB103_4
; VI-NEXT: .LBB103_3:
-; VI-NEXT: s_branch .LBB103_2
-; VI-NEXT: .LBB103_4:
; VI-NEXT: v_mov_b32_e32 v16, s36
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v17, s37
@@ -216230,7 +212417,7 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB103_5: ; %end
+; VI-NEXT: .LBB103_4: ; %end
; VI-NEXT: v_readlane_b32 s51, v33, 9
; VI-NEXT: v_readlane_b32 s50, v33, 8
; VI-NEXT: v_readlane_b32 s49, v33, 7
@@ -216283,9 +212470,9 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB103_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB103_4
+; GFX9-NEXT: s_cbranch_execnz .LBB103_3
; GFX9-NEXT: .LBB103_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v16, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, s31, v16 op_sel_hi:[1,0]
@@ -216320,10 +212507,8 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; GFX9-NEXT: v_pk_add_f16 v18, s38, v16 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v17, s37, v16 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v16, s36, v16 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB103_5
+; GFX9-NEXT: s_branch .LBB103_4
; GFX9-NEXT: .LBB103_3:
-; GFX9-NEXT: s_branch .LBB103_2
-; GFX9-NEXT: .LBB103_4:
; GFX9-NEXT: v_mov_b32_e32 v16, s36
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v17, s37
@@ -216356,7 +212541,7 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB103_5: ; %end
+; GFX9-NEXT: .LBB103_4: ; %end
; GFX9-NEXT: v_readlane_b32 s51, v32, 9
; GFX9-NEXT: v_readlane_b32 s50, v32, 8
; GFX9-NEXT: v_readlane_b32 s49, v32, 7
@@ -216410,10 +212595,10 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; GFX11-NEXT: v_readfirstlane_b32 s50, v12
; GFX11-NEXT: v_writelane_b32 v32, s51, 7
; GFX11-NEXT: v_readfirstlane_b32 s51, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB103_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB103_3
; GFX11-NEXT: .LBB103_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
@@ -216447,10 +212632,8 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; GFX11-NEXT: v_pk_add_f16 v18, 0x200, s38 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s37 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v16, 0x200, s36 op_sel_hi:[0,1]
-; GFX11-NEXT: s_branch .LBB103_5
+; GFX11-NEXT: s_branch .LBB103_4
; GFX11-NEXT: .LBB103_3:
-; GFX11-NEXT: s_branch .LBB103_2
-; GFX11-NEXT: .LBB103_4:
; GFX11-NEXT: v_dual_mov_b32 v16, s36 :: v_dual_mov_b32 v17, s37
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v18, s38 :: v_dual_mov_b32 v19, s39
@@ -216467,7 +212650,7 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: .LBB103_5: ; %end
+; GFX11-NEXT: .LBB103_4: ; %end
; GFX11-NEXT: v_readlane_b32 s51, v32, 7
; GFX11-NEXT: v_readlane_b32 s50, v32, 6
; GFX11-NEXT: v_readlane_b32 s49, v32, 5
@@ -221090,9 +217273,9 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB105_3
+; VI-NEXT: s_cbranch_scc0 .LBB105_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB105_4
+; VI-NEXT: s_cbranch_execnz .LBB105_3
; VI-NEXT: .LBB105_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v32, 0x40c00000
; VI-NEXT: s_lshl_b32 s4, s48, 16
@@ -221686,10 +217869,8 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_cndmask_b32_e32 v31, v32, v34, vcc
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v33
; VI-NEXT: v_lshrrev_b64 v[31:32], 16, v[31:32]
-; VI-NEXT: s_branch .LBB105_5
+; VI-NEXT: s_branch .LBB105_4
; VI-NEXT: .LBB105_3:
-; VI-NEXT: s_branch .LBB105_2
-; VI-NEXT: .LBB105_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v16, s36
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -221722,7 +217903,7 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_mov_b32_e32 v29, s49
; VI-NEXT: v_mov_b32_e32 v30, s50
; VI-NEXT: v_mov_b32_e32 v31, s51
-; VI-NEXT: .LBB105_5: ; %end
+; VI-NEXT: .LBB105_4: ; %end
; VI-NEXT: v_readlane_b32 s51, v35, 9
; VI-NEXT: v_readlane_b32 s50, v35, 8
; VI-NEXT: v_readlane_b32 s49, v35, 7
@@ -221775,9 +217956,9 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB105_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB105_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB105_4
+; GFX9-NEXT: s_cbranch_execnz .LBB105_3
; GFX9-NEXT: .LBB105_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v16, 0x40c00000
; GFX9-NEXT: s_and_b32 s4, s50, 0xffff0000
@@ -222357,10 +218538,8 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_cndmask_b32_e32 v16, v34, v35, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v16
; GFX9-NEXT: v_and_or_b32 v16, v33, v32, v16
-; GFX9-NEXT: s_branch .LBB105_5
+; GFX9-NEXT: s_branch .LBB105_4
; GFX9-NEXT: .LBB105_3:
-; GFX9-NEXT: s_branch .LBB105_2
-; GFX9-NEXT: .LBB105_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v16, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -222393,7 +218572,7 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v29, s49
; GFX9-NEXT: v_mov_b32_e32 v30, s50
; GFX9-NEXT: v_mov_b32_e32 v31, s51
-; GFX9-NEXT: .LBB105_5: ; %end
+; GFX9-NEXT: .LBB105_4: ; %end
; GFX9-NEXT: v_readlane_b32 s51, v36, 9
; GFX9-NEXT: v_readlane_b32 s50, v36, 8
; GFX9-NEXT: v_readlane_b32 s49, v36, 7
@@ -222447,10 +218626,10 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s50, v12
; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s51, 7
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s51, v13
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB105_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB105_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB105_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB105_3
; GFX11-TRUE16-NEXT: .LBB105_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s0, s36, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s36, 16
@@ -222990,10 +219169,8 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v35.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v33.h
-; GFX11-TRUE16-NEXT: s_branch .LBB105_5
+; GFX11-TRUE16-NEXT: s_branch .LBB105_4
; GFX11-TRUE16-NEXT: .LBB105_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB105_2
-; GFX11-TRUE16-NEXT: .LBB105_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s36 :: v_dual_mov_b32 v17, s37
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -223010,7 +219187,7 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s46 :: v_dual_mov_b32 v27, s47
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s48 :: v_dual_mov_b32 v29, s49
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s50 :: v_dual_mov_b32 v31, s51
-; GFX11-TRUE16-NEXT: .LBB105_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB105_4: ; %end
; GFX11-TRUE16-NEXT: v_readlane_b32 s51, v40, 7
; GFX11-TRUE16-NEXT: v_readlane_b32 s50, v40, 6
; GFX11-TRUE16-NEXT: v_readlane_b32 s49, v40, 5
@@ -223062,10 +219239,10 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s50, v12
; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s51, 7
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s51, v13
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB105_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB105_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB105_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB105_3
; GFX11-FAKE16-NEXT: .LBB105_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s36, 16
; GFX11-FAKE16-NEXT: s_and_b32 s0, s36, 0xffff0000
@@ -223651,10 +219828,8 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: v_and_or_b32 v18, 0xffff0000, v33, v38
; GFX11-FAKE16-NEXT: v_and_or_b32 v17, 0xffff0000, v32, v39
; GFX11-FAKE16-NEXT: v_and_or_b32 v16, 0xffff0000, v16, v48
-; GFX11-FAKE16-NEXT: s_branch .LBB105_5
+; GFX11-FAKE16-NEXT: s_branch .LBB105_4
; GFX11-FAKE16-NEXT: .LBB105_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB105_2
-; GFX11-FAKE16-NEXT: .LBB105_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s36 :: v_dual_mov_b32 v17, s37
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -223671,7 +219846,7 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s46 :: v_dual_mov_b32 v27, s47
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s48 :: v_dual_mov_b32 v29, s49
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s50 :: v_dual_mov_b32 v31, s51
-; GFX11-FAKE16-NEXT: .LBB105_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB105_4: ; %end
; GFX11-FAKE16-NEXT: v_readlane_b32 s51, v40, 7
; GFX11-FAKE16-NEXT: v_readlane_b32 s50, v40, 6
; GFX11-FAKE16-NEXT: v_readlane_b32 s49, v40, 5
@@ -225491,7 +221666,7 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
; VI-NEXT: v_writelane_b32 v32, s39, 7
-; VI-NEXT: s_cbranch_scc0 .LBB107_4
+; VI-NEXT: s_cbranch_scc0 .LBB107_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB107_3
; VI-NEXT: .LBB107_2: ; %cmp.true
@@ -225701,8 +221876,6 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB107_4:
-; VI-NEXT: s_branch .LBB107_2
;
; GFX9-LABEL: bitcast_v64i16_to_v64bf16_scalar:
; GFX9: ; %bb.0:
@@ -225740,9 +221913,9 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB107_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB107_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB107_4
+; GFX9-NEXT: s_cbranch_execnz .LBB107_3
; GFX9-NEXT: .LBB107_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, s31, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, s30, 3 op_sel_hi:[1,0]
@@ -225776,10 +221949,8 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; GFX9-NEXT: v_pk_add_u16 v18, s38, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v17, s37, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v16, s36, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB107_5
+; GFX9-NEXT: s_branch .LBB107_4
; GFX9-NEXT: .LBB107_3:
-; GFX9-NEXT: s_branch .LBB107_2
-; GFX9-NEXT: .LBB107_4:
; GFX9-NEXT: v_mov_b32_e32 v16, s36
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v17, s37
@@ -225812,7 +221983,7 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB107_5: ; %end
+; GFX9-NEXT: .LBB107_4: ; %end
; GFX9-NEXT: v_readlane_b32 s51, v32, 9
; GFX9-NEXT: v_readlane_b32 s50, v32, 8
; GFX9-NEXT: v_readlane_b32 s49, v32, 7
@@ -225866,10 +222037,10 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; GFX11-NEXT: v_readfirstlane_b32 s50, v12
; GFX11-NEXT: v_writelane_b32 v32, s51, 7
; GFX11-NEXT: v_readfirstlane_b32 s51, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB107_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB107_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB107_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB107_3
; GFX11-NEXT: .LBB107_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
@@ -225903,10 +222074,8 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; GFX11-NEXT: v_pk_add_u16 v18, s38, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v17, s37, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v16, s36, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: s_branch .LBB107_5
+; GFX11-NEXT: s_branch .LBB107_4
; GFX11-NEXT: .LBB107_3:
-; GFX11-NEXT: s_branch .LBB107_2
-; GFX11-NEXT: .LBB107_4:
; GFX11-NEXT: v_dual_mov_b32 v16, s36 :: v_dual_mov_b32 v17, s37
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v18, s38 :: v_dual_mov_b32 v19, s39
@@ -225923,7 +222092,7 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: .LBB107_5: ; %end
+; GFX11-NEXT: .LBB107_4: ; %end
; GFX11-NEXT: v_readlane_b32 s51, v32, 7
; GFX11-NEXT: v_readlane_b32 s50, v32, 6
; GFX11-NEXT: v_readlane_b32 s49, v32, 5
@@ -226751,9 +222920,9 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 ; 4-byte Folded Spill
-; SI-NEXT: s_cbranch_scc0 .LBB109_3
+; SI-NEXT: s_cbranch_scc0 .LBB109_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB109_4
+; SI-NEXT: s_cbranch_execnz .LBB109_3
; SI-NEXT: .LBB109_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s39
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -227090,10 +223259,8 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v50, v7
-; SI-NEXT: s_branch .LBB109_5
+; SI-NEXT: s_branch .LBB109_4
; SI-NEXT: .LBB109_3:
-; SI-NEXT: s_branch .LBB109_2
-; SI-NEXT: .LBB109_4:
; SI-NEXT: v_mov_b32_e32 v6, s20
; SI-NEXT: v_mov_b32_e32 v7, s25
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
@@ -227177,7 +223344,7 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v7, s77
; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; SI-NEXT: .LBB109_5: ; %end
+; SI-NEXT: .LBB109_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
@@ -227362,9 +223529,9 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB109_3
+; VI-NEXT: s_cbranch_scc0 .LBB109_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB109_4
+; VI-NEXT: s_cbranch_execnz .LBB109_3
; VI-NEXT: .LBB109_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s50, 16
; VI-NEXT: v_mov_b32_e32 v0, s4
@@ -227527,10 +223694,8 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_add_f16_sdwa v32, v32, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v16, s36, v16
; VI-NEXT: v_or_b32_e32 v16, v16, v32
-; VI-NEXT: s_branch .LBB109_5
+; VI-NEXT: s_branch .LBB109_4
; VI-NEXT: .LBB109_3:
-; VI-NEXT: s_branch .LBB109_2
-; VI-NEXT: .LBB109_4:
; VI-NEXT: v_mov_b32_e32 v16, s36
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v17, s37
@@ -227563,7 +223728,7 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB109_5: ; %end
+; VI-NEXT: .LBB109_4: ; %end
; VI-NEXT: v_readlane_b32 s51, v33, 9
; VI-NEXT: v_readlane_b32 s50, v33, 8
; VI-NEXT: v_readlane_b32 s49, v33, 7
@@ -227616,9 +223781,9 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB109_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB109_4
+; GFX9-NEXT: s_cbranch_execnz .LBB109_3
; GFX9-NEXT: .LBB109_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v16, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, s31, v16 op_sel_hi:[1,0]
@@ -227653,10 +223818,8 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v18, s38, v16 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v17, s37, v16 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v16, s36, v16 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB109_5
+; GFX9-NEXT: s_branch .LBB109_4
; GFX9-NEXT: .LBB109_3:
-; GFX9-NEXT: s_branch .LBB109_2
-; GFX9-NEXT: .LBB109_4:
; GFX9-NEXT: v_mov_b32_e32 v16, s36
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v17, s37
@@ -227689,7 +223852,7 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB109_5: ; %end
+; GFX9-NEXT: .LBB109_4: ; %end
; GFX9-NEXT: v_readlane_b32 s51, v32, 9
; GFX9-NEXT: v_readlane_b32 s50, v32, 8
; GFX9-NEXT: v_readlane_b32 s49, v32, 7
@@ -227743,10 +223906,10 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s50, v12
; GFX11-NEXT: v_writelane_b32 v32, s51, 7
; GFX11-NEXT: v_readfirstlane_b32 s51, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB109_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB109_3
; GFX11-NEXT: .LBB109_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
@@ -227780,10 +223943,8 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v18, 0x200, s38 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s37 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v16, 0x200, s36 op_sel_hi:[0,1]
-; GFX11-NEXT: s_branch .LBB109_5
+; GFX11-NEXT: s_branch .LBB109_4
; GFX11-NEXT: .LBB109_3:
-; GFX11-NEXT: s_branch .LBB109_2
-; GFX11-NEXT: .LBB109_4:
; GFX11-NEXT: v_dual_mov_b32 v16, s36 :: v_dual_mov_b32 v17, s37
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v18, s38 :: v_dual_mov_b32 v19, s39
@@ -227800,7 +223961,7 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: .LBB109_5: ; %end
+; GFX11-NEXT: .LBB109_4: ; %end
; GFX11-NEXT: v_readlane_b32 s51, v32, 7
; GFX11-NEXT: v_readlane_b32 s50, v32, 6
; GFX11-NEXT: v_readlane_b32 s49, v32, 5
@@ -229855,7 +226016,7 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
; VI-NEXT: v_writelane_b32 v32, s39, 7
-; VI-NEXT: s_cbranch_scc0 .LBB111_4
+; VI-NEXT: s_cbranch_scc0 .LBB111_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB111_3
; VI-NEXT: .LBB111_2: ; %cmp.true
@@ -230065,8 +226226,6 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB111_4:
-; VI-NEXT: s_branch .LBB111_2
;
; GFX9-LABEL: bitcast_v64i16_to_v64f16_scalar:
; GFX9: ; %bb.0:
@@ -230104,9 +226263,9 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB111_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB111_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB111_4
+; GFX9-NEXT: s_cbranch_execnz .LBB111_3
; GFX9-NEXT: .LBB111_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, s31, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, s30, 3 op_sel_hi:[1,0]
@@ -230140,10 +226299,8 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v18, s38, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v17, s37, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v16, s36, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB111_5
+; GFX9-NEXT: s_branch .LBB111_4
; GFX9-NEXT: .LBB111_3:
-; GFX9-NEXT: s_branch .LBB111_2
-; GFX9-NEXT: .LBB111_4:
; GFX9-NEXT: v_mov_b32_e32 v16, s36
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v17, s37
@@ -230176,7 +226333,7 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB111_5: ; %end
+; GFX9-NEXT: .LBB111_4: ; %end
; GFX9-NEXT: v_readlane_b32 s51, v32, 9
; GFX9-NEXT: v_readlane_b32 s50, v32, 8
; GFX9-NEXT: v_readlane_b32 s49, v32, 7
@@ -230230,10 +226387,10 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s50, v12
; GFX11-NEXT: v_writelane_b32 v32, s51, 7
; GFX11-NEXT: v_readfirstlane_b32 s51, v13
-; GFX11-NEXT: s_cbranch_scc0 .LBB111_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB111_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB111_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB111_3
; GFX11-NEXT: .LBB111_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
@@ -230267,10 +226424,8 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v18, s38, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v17, s37, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v16, s36, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: s_branch .LBB111_5
+; GFX11-NEXT: s_branch .LBB111_4
; GFX11-NEXT: .LBB111_3:
-; GFX11-NEXT: s_branch .LBB111_2
-; GFX11-NEXT: .LBB111_4:
; GFX11-NEXT: v_dual_mov_b32 v16, s36 :: v_dual_mov_b32 v17, s37
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v18, s38 :: v_dual_mov_b32 v19, s39
@@ -230287,7 +226442,7 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: .LBB111_5: ; %end
+; GFX11-NEXT: .LBB111_4: ; %end
; GFX11-NEXT: v_readlane_b32 s51, v32, 7
; GFX11-NEXT: v_readlane_b32 s50, v32, 6
; GFX11-NEXT: v_readlane_b32 s49, v32, 5
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll
index 94f4be1c32418..433dc8eb7f82f 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll
@@ -93,7 +93,7 @@ define inreg <4 x float> @bitcast_v4i32_to_v4f32_scalar(<4 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -107,14 +107,12 @@ define inreg <4 x float> @bitcast_v4i32_to_v4f32_scalar(<4 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v4i32_to_v4f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -128,14 +126,12 @@ define inreg <4 x float> @bitcast_v4i32_to_v4f32_scalar(<4 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v4i32_to_v4f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -149,15 +145,13 @@ define inreg <4 x float> @bitcast_v4i32_to_v4f32_scalar(<4 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v4i32_to_v4f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -171,8 +165,6 @@ define inreg <4 x float> @bitcast_v4i32_to_v4f32_scalar(<4 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -275,9 +267,9 @@ define inreg <4 x i32> @bitcast_v4f32_to_v4i32_scalar(<4 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v3, s19, 1.0
; SI-NEXT: v_add_f32_e64 v2, s18, 1.0
@@ -285,8 +277,6 @@ define inreg <4 x i32> @bitcast_v4f32_to_v4i32_scalar(<4 x float> inreg %a, i32
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -297,9 +287,9 @@ define inreg <4 x i32> @bitcast_v4f32_to_v4i32_scalar(<4 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
@@ -307,8 +297,6 @@ define inreg <4 x i32> @bitcast_v4f32_to_v4i32_scalar(<4 x float> inreg %a, i32
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -319,9 +307,9 @@ define inreg <4 x i32> @bitcast_v4f32_to_v4i32_scalar(<4 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
@@ -329,8 +317,6 @@ define inreg <4 x i32> @bitcast_v4f32_to_v4i32_scalar(<4 x float> inreg %a, i32
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -342,10 +328,10 @@ define inreg <4 x i32> @bitcast_v4f32_to_v4i32_scalar(<4 x float> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
@@ -353,8 +339,6 @@ define inreg <4 x i32> @bitcast_v4f32_to_v4i32_scalar(<4 x float> inreg %a, i32
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -462,7 +446,7 @@ define inreg <2 x i64> @bitcast_v4i32_to_v2i64_scalar(<4 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB5_3
; SI-NEXT: .LBB5_2: ; %cmp.true
@@ -476,14 +460,12 @@ define inreg <2 x i64> @bitcast_v4i32_to_v2i64_scalar(<4 x i32> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v4i32_to_v2i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -497,14 +479,12 @@ define inreg <2 x i64> @bitcast_v4i32_to_v2i64_scalar(<4 x i32> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v4i32_to_v2i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -518,15 +498,13 @@ define inreg <2 x i64> @bitcast_v4i32_to_v2i64_scalar(<4 x i32> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v4i32_to_v2i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -540,8 +518,6 @@ define inreg <2 x i64> @bitcast_v4i32_to_v2i64_scalar(<4 x i32> inreg %a, i32 in
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -647,7 +623,7 @@ define inreg <4 x i32> @bitcast_v2i64_to_v4i32_scalar(<2 x i64> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB7_3
; SI-NEXT: .LBB7_2: ; %cmp.true
@@ -661,14 +637,12 @@ define inreg <4 x i32> @bitcast_v2i64_to_v4i32_scalar(<2 x i64> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v2i64_to_v4i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -682,14 +656,12 @@ define inreg <4 x i32> @bitcast_v2i64_to_v4i32_scalar(<2 x i64> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v2i64_to_v4i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
@@ -703,15 +675,13 @@ define inreg <4 x i32> @bitcast_v2i64_to_v4i32_scalar(<2 x i64> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v2i64_to_v4i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
@@ -725,8 +695,6 @@ define inreg <4 x i32> @bitcast_v2i64_to_v4i32_scalar(<2 x i64> inreg %a, i32 in
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -831,7 +799,7 @@ define inreg <2 x double> @bitcast_v4i32_to_v2f64_scalar(<4 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB9_3
; SI-NEXT: .LBB9_2: ; %cmp.true
@@ -845,14 +813,12 @@ define inreg <2 x double> @bitcast_v4i32_to_v2f64_scalar(<4 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v4i32_to_v2f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -866,14 +832,12 @@ define inreg <2 x double> @bitcast_v4i32_to_v2f64_scalar(<4 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v4i32_to_v2f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -887,15 +851,13 @@ define inreg <2 x double> @bitcast_v4i32_to_v2f64_scalar(<4 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v4i32_to_v2f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -909,8 +871,6 @@ define inreg <2 x double> @bitcast_v4i32_to_v2f64_scalar(<4 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1010,16 +970,14 @@ define inreg <4 x i32> @bitcast_v2f64_to_v4i32_scalar(<2 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB11_3:
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -1030,16 +988,14 @@ define inreg <4 x i32> @bitcast_v2f64_to_v4i32_scalar(<2 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1050,16 +1006,14 @@ define inreg <4 x i32> @bitcast_v2f64_to_v4i32_scalar(<2 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1071,17 +1025,15 @@ define inreg <4 x i32> @bitcast_v2f64_to_v4i32_scalar(<2 x double> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -1215,7 +1167,7 @@ define inreg <8 x i16> @bitcast_v4i32_to_v8i16_scalar(<4 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB13_4
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s10, s19, 16
; SI-NEXT: s_lshr_b32 s11, s17, 16
@@ -1249,18 +1201,12 @@ define inreg <8 x i16> @bitcast_v4i32_to_v8i16_scalar(<4 x i32> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v2, s4
; SI-NEXT: v_mov_b32_e32 v3, s7
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB13_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: s_branch .LBB13_2
;
; VI-LABEL: bitcast_v4i32_to_v8i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
@@ -1274,14 +1220,12 @@ define inreg <8 x i16> @bitcast_v4i32_to_v8i16_scalar(<4 x i32> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v4i32_to_v8i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
@@ -1295,15 +1239,13 @@ define inreg <8 x i16> @bitcast_v4i32_to_v8i16_scalar(<4 x i32> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v4i32_to_v8i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
@@ -1317,8 +1259,6 @@ define inreg <8 x i16> @bitcast_v4i32_to_v8i16_scalar(<4 x i32> inreg %a, i32 in
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1486,7 +1426,7 @@ define inreg <4 x i32> @bitcast_v8i16_to_v4i32_scalar(<8 x i16> inreg %a, i32 in
; SI-NEXT: s_lshr_b32 s12, s17, 16
; SI-NEXT: s_lshr_b32 s13, s16, 16
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s13, 16
@@ -1528,15 +1468,12 @@ define inreg <4 x i32> @bitcast_v8i16_to_v4i32_scalar(<8 x i16> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v3, s7
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v8i16_to_v4i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB15_3
; VI-NEXT: .LBB15_2: ; %cmp.true
@@ -1566,16 +1503,14 @@ define inreg <4 x i32> @bitcast_v8i16_to_v4i32_scalar(<8 x i16> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v8i16_to_v4i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
@@ -1583,8 +1518,6 @@ define inreg <4 x i32> @bitcast_v8i16_to_v4i32_scalar(<8 x i16> inreg %a, i32 in
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1596,10 +1529,10 @@ define inreg <4 x i32> @bitcast_v8i16_to_v4i32_scalar(<8 x i16> inreg %a, i32 in
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
@@ -1607,8 +1540,6 @@ define inreg <4 x i32> @bitcast_v8i16_to_v4i32_scalar(<8 x i16> inreg %a, i32 in
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -1742,7 +1673,7 @@ define inreg <8 x half> @bitcast_v4i32_to_v8f16_scalar(<4 x i32> inreg %a, i32 i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB17_4
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s10, s19, 16
; SI-NEXT: s_lshr_b32 s11, s17, 16
@@ -1776,18 +1707,12 @@ define inreg <8 x half> @bitcast_v4i32_to_v8f16_scalar(<4 x i32> inreg %a, i32 i
; SI-NEXT: v_mov_b32_e32 v2, s4
; SI-NEXT: v_mov_b32_e32 v3, s7
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB17_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: s_branch .LBB17_2
;
; VI-LABEL: bitcast_v4i32_to_v8f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
@@ -1801,14 +1726,12 @@ define inreg <8 x half> @bitcast_v4i32_to_v8f16_scalar(<4 x i32> inreg %a, i32 i
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v4i32_to_v8f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
@@ -1822,15 +1745,13 @@ define inreg <8 x half> @bitcast_v4i32_to_v8f16_scalar(<4 x i32> inreg %a, i32 i
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v4i32_to_v8f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
@@ -1844,8 +1765,6 @@ define inreg <8 x half> @bitcast_v4i32_to_v8f16_scalar(<4 x i32> inreg %a, i32 i
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2030,7 +1949,7 @@ define inreg <4 x i32> @bitcast_v8f16_to_v4i32_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: s_lshr_b32 s12, s17, 16
; SI-NEXT: s_lshr_b32 s13, s16, 16
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s13, 16
@@ -2044,7 +1963,7 @@ define inreg <4 x i32> @bitcast_v8f16_to_v4i32_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: s_and_b32 s7, s19, 0xffff
; SI-NEXT: s_lshl_b32 s8, s10, 16
; SI-NEXT: s_or_b32 s7, s7, s8
-; SI-NEXT: s_cbranch_execnz .LBB19_4
+; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s13
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -2080,9 +1999,6 @@ define inreg <4 x i32> @bitcast_v8f16_to_v4i32_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: v_or_b32_e32 v3, v5, v3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; SI-NEXT: s_branch .LBB19_2
-; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -2093,9 +2009,9 @@ define inreg <4 x i32> @bitcast_v8f16_to_v4i32_scalar(<8 x half> inreg %a, i32 i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
+; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s19, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -2120,8 +2036,6 @@ define inreg <4 x i32> @bitcast_v8f16_to_v4i32_scalar(<8 x half> inreg %a, i32 i
; VI-NEXT: v_or_b32_e32 v0, v0, v4
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
-; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2132,9 +2046,9 @@ define inreg <4 x i32> @bitcast_v8f16_to_v4i32_scalar(<8 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v3, s19, v0 op_sel_hi:[1,0]
@@ -2143,8 +2057,6 @@ define inreg <4 x i32> @bitcast_v8f16_to_v4i32_scalar(<8 x half> inreg %a, i32 i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2156,10 +2068,10 @@ define inreg <4 x i32> @bitcast_v8f16_to_v4i32_scalar(<8 x half> inreg %a, i32 i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
@@ -2167,8 +2079,6 @@ define inreg <4 x i32> @bitcast_v8f16_to_v4i32_scalar(<8 x half> inreg %a, i32 i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -2323,7 +2233,7 @@ define inreg <8 x bfloat> @bitcast_v4i32_to_v8bf16_scalar(<4 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB21_4
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s7, s19, 0xffff0000
; SI-NEXT: s_lshl_b32 s6, s19, 16
@@ -2365,22 +2275,12 @@ define inreg <8 x bfloat> @bitcast_v4i32_to_v8bf16_scalar(<4 x i32> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s6
; SI-NEXT: v_lshr_b64 v[3:4], v[3:4], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_4:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB21_2
;
; VI-LABEL: bitcast_v4i32_to_v8bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
@@ -2394,14 +2294,12 @@ define inreg <8 x bfloat> @bitcast_v4i32_to_v8bf16_scalar(<4 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v4i32_to_v8bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
@@ -2415,15 +2313,13 @@ define inreg <8 x bfloat> @bitcast_v4i32_to_v8bf16_scalar(<4 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_v4i32_to_v8bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
@@ -2437,8 +2333,6 @@ define inreg <8 x bfloat> @bitcast_v4i32_to_v8bf16_scalar(<4 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_4:
-; GFX11-NEXT: s_branch .LBB21_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2901,7 +2795,7 @@ define inreg <4 x i32> @bitcast_v8bf16_to_v4i32_scalar(<8 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s7
; SI-NEXT: v_mul_f32_e64 v13, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v16
; SI-NEXT: v_lshr_b64 v[0:1], v[11:12], 16
@@ -2939,17 +2833,14 @@ define inreg <4 x i32> @bitcast_v8bf16_to_v4i32_scalar(<8 x bfloat> inreg %a, i3
; SI-NEXT: v_lshr_b64 v[3:4], v[3:4], 16
; SI-NEXT: .LBB23_3: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v8bf16_to_v4i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_3
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_4
+; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s19, 16
; VI-NEXT: v_mov_b32_e32 v7, 0x40c00000
@@ -3028,8 +2919,6 @@ define inreg <4 x i32> @bitcast_v8bf16_to_v4i32_scalar(<8 x bfloat> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v3, v4
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB23_3:
-; VI-NEXT: s_branch .LBB23_2
-; VI-NEXT: .LBB23_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -3040,9 +2929,9 @@ define inreg <4 x i32> @bitcast_v8bf16_to_v4i32_scalar(<8 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_4
+; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s19
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -3124,8 +3013,6 @@ define inreg <4 x i32> @bitcast_v8bf16_to_v4i32_scalar(<8 x bfloat> inreg %a, i3
; GFX9-NEXT: v_lshl_or_b32 v0, v5, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB23_3:
-; GFX9-NEXT: s_branch .LBB23_2
-; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -3137,10 +3024,10 @@ define inreg <4 x i32> @bitcast_v8bf16_to_v4i32_scalar(<8 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB23_3
; GFX11-TRUE16-NEXT: .LBB23_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s3
; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
@@ -3224,8 +3111,6 @@ define inreg <4 x i32> @bitcast_v8bf16_to_v4i32_scalar(<8 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v5.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB23_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB23_2
-; GFX11-TRUE16-NEXT: .LBB23_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -3235,10 +3120,10 @@ define inreg <4 x i32> @bitcast_v8bf16_to_v4i32_scalar(<8 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB23_3
; GFX11-FAKE16-NEXT: .LBB23_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s3, 16
; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s3
@@ -3327,8 +3212,6 @@ define inreg <4 x i32> @bitcast_v8bf16_to_v4i32_scalar(<8 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v7, 16, v8
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB23_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB23_2
-; GFX11-FAKE16-NEXT: .LBB23_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
@@ -3680,7 +3563,7 @@ define inreg <16 x i8> @bitcast_v4i32_to_v16i8_scalar(<4 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB25_4
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s22, s19, 24
; SI-NEXT: s_lshr_b32 s23, s19, 16
@@ -3730,26 +3613,12 @@ define inreg <16 x i8> @bitcast_v4i32_to_v16i8_scalar(<4 x i32> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v14, s23
; SI-NEXT: v_mov_b32_e32 v15, s22
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB25_4:
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr23
-; SI-NEXT: ; implicit-def: $sgpr22
-; SI-NEXT: s_branch .LBB25_2
;
; VI-LABEL: bitcast_v4i32_to_v16i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB25_4
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s10, s19, 24
; VI-NEXT: s_lshr_b32 s11, s19, 16
@@ -3799,26 +3668,12 @@ define inreg <16 x i8> @bitcast_v4i32_to_v16i8_scalar(<4 x i32> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v14, s11
; VI-NEXT: v_mov_b32_e32 v15, s10
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB25_4:
-; VI-NEXT: ; implicit-def: $sgpr23
-; VI-NEXT: ; implicit-def: $sgpr22
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr21
-; VI-NEXT: ; implicit-def: $sgpr20
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB25_2
;
; GFX9-LABEL: bitcast_v4i32_to_v16i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s10, s19, 24
; GFX9-NEXT: s_lshr_b32 s11, s19, 16
@@ -3868,27 +3723,13 @@ define inreg <16 x i8> @bitcast_v4i32_to_v16i8_scalar(<4 x i32> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v14, s11
; GFX9-NEXT: v_mov_b32_e32 v15, s10
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB25_4:
-; GFX9-NEXT: ; implicit-def: $sgpr23
-; GFX9-NEXT: ; implicit-def: $sgpr22
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr21
-; GFX9-NEXT: ; implicit-def: $sgpr20
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB25_2
;
; GFX11-LABEL: bitcast_v4i32_to_v16i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s18, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s8, s3, 24
; GFX11-NEXT: s_lshr_b32 s9, s3, 16
@@ -3932,20 +3773,6 @@ define inreg <16 x i8> @bitcast_v4i32_to_v16i8_scalar(<4 x i32> inreg %a, i32 in
; GFX11-NEXT: v_dual_mov_b32 v12, s3 :: v_dual_mov_b32 v13, s10
; GFX11-NEXT: v_dual_mov_b32 v14, s9 :: v_dual_mov_b32 v15, s8
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB25_4:
-; GFX11-NEXT: ; implicit-def: $sgpr17
-; GFX11-NEXT: ; implicit-def: $sgpr16
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: s_branch .LBB25_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4496,7 +4323,7 @@ define inreg <4 x i32> @bitcast_v16i8_to_v4i32_scalar(<16 x i8> inreg %a, i32 in
; SI-NEXT: v_readfirstlane_b32 s10, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s11, v0
-; SI-NEXT: s_cbranch_scc0 .LBB27_4
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -4594,9 +4421,6 @@ define inreg <4 x i32> @bitcast_v16i8_to_v4i32_scalar(<16 x i8> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v3, s7
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB27_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; SI-NEXT: s_branch .LBB27_2
;
; VI-LABEL: bitcast_v16i8_to_v4i32_scalar:
; VI: ; %bb.0:
@@ -4605,7 +4429,7 @@ define inreg <4 x i32> @bitcast_v16i8_to_v4i32_scalar(<16 x i8> inreg %a, i32 in
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB27_4
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v3, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -4677,9 +4501,6 @@ define inreg <4 x i32> @bitcast_v16i8_to_v4i32_scalar(<16 x i8> inreg %a, i32 in
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v3
; VI-NEXT: .LBB27_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; VI-NEXT: s_branch .LBB27_2
;
; GFX9-LABEL: bitcast_v16i8_to_v4i32_scalar:
; GFX9: ; %bb.0:
@@ -4688,7 +4509,7 @@ define inreg <4 x i32> @bitcast_v16i8_to_v4i32_scalar(<16 x i8> inreg %a, i32 in
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v3, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -4757,16 +4578,13 @@ define inreg <4 x i32> @bitcast_v16i8_to_v4i32_scalar(<16 x i8> inreg %a, i32 in
; GFX9-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB27_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB27_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX9-NEXT: s_branch .LBB27_2
;
; GFX11-LABEL: bitcast_v16i8_to_v4i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -4831,9 +4649,6 @@ define inreg <4 x i32> @bitcast_v16i8_to_v4i32_scalar(<16 x i8> inreg %a, i32 in
; GFX11-NEXT: v_or_b32_e32 v3, v7, v8
; GFX11-NEXT: .LBB27_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB27_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX11-NEXT: s_branch .LBB27_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4936,9 +4751,9 @@ define inreg <2 x i64> @bitcast_v4f32_to_v2i64_scalar(<4 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB29_4
+; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v3, s19, 1.0
; SI-NEXT: v_add_f32_e64 v2, s18, 1.0
@@ -4946,8 +4761,6 @@ define inreg <2 x i64> @bitcast_v4f32_to_v2i64_scalar(<4 x float> inreg %a, i32
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB29_3:
-; SI-NEXT: s_branch .LBB29_2
-; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -4958,9 +4771,9 @@ define inreg <2 x i64> @bitcast_v4f32_to_v2i64_scalar(<4 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
+; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
@@ -4968,8 +4781,6 @@ define inreg <2 x i64> @bitcast_v4f32_to_v2i64_scalar(<4 x float> inreg %a, i32
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
-; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -4980,9 +4791,9 @@ define inreg <2 x i64> @bitcast_v4f32_to_v2i64_scalar(<4 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
+; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
@@ -4990,8 +4801,6 @@ define inreg <2 x i64> @bitcast_v4f32_to_v2i64_scalar(<4 x float> inreg %a, i32
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
-; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -5003,10 +4812,10 @@ define inreg <2 x i64> @bitcast_v4f32_to_v2i64_scalar(<4 x float> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-NEXT: .LBB29_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
@@ -5014,8 +4823,6 @@ define inreg <2 x i64> @bitcast_v4f32_to_v2i64_scalar(<4 x float> inreg %a, i32
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
-; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -5124,7 +4931,7 @@ define inreg <4 x float> @bitcast_v2i64_to_v4f32_scalar(<2 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB31_3
; SI-NEXT: .LBB31_2: ; %cmp.true
@@ -5138,14 +4945,12 @@ define inreg <4 x float> @bitcast_v2i64_to_v4f32_scalar(<2 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v2i64_to_v4f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB31_3
; VI-NEXT: .LBB31_2: ; %cmp.true
@@ -5159,14 +4964,12 @@ define inreg <4 x float> @bitcast_v2i64_to_v4f32_scalar(<2 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v2i64_to_v4f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
@@ -5180,15 +4983,13 @@ define inreg <4 x float> @bitcast_v2i64_to_v4f32_scalar(<2 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB31_4:
-; GFX9-NEXT: s_branch .LBB31_2
;
; GFX11-LABEL: bitcast_v2i64_to_v4f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
@@ -5202,8 +5003,6 @@ define inreg <4 x float> @bitcast_v2i64_to_v4f32_scalar(<2 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB31_4:
-; GFX11-NEXT: s_branch .LBB31_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5306,9 +5105,9 @@ define inreg <2 x double> @bitcast_v4f32_to_v2f64_scalar(<4 x float> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB33_4
+; SI-NEXT: s_cbranch_execnz .LBB33_3
; SI-NEXT: .LBB33_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v3, s19, 1.0
; SI-NEXT: v_add_f32_e64 v2, s18, 1.0
@@ -5316,8 +5115,6 @@ define inreg <2 x double> @bitcast_v4f32_to_v2f64_scalar(<4 x float> inreg %a, i
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB33_3:
-; SI-NEXT: s_branch .LBB33_2
-; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -5328,9 +5125,9 @@ define inreg <2 x double> @bitcast_v4f32_to_v2f64_scalar(<4 x float> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_4
+; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
@@ -5338,8 +5135,6 @@ define inreg <2 x double> @bitcast_v4f32_to_v2f64_scalar(<4 x float> inreg %a, i
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB33_3:
-; VI-NEXT: s_branch .LBB33_2
-; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -5350,9 +5145,9 @@ define inreg <2 x double> @bitcast_v4f32_to_v2f64_scalar(<4 x float> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
+; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
@@ -5360,8 +5155,6 @@ define inreg <2 x double> @bitcast_v4f32_to_v2f64_scalar(<4 x float> inreg %a, i
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: s_branch .LBB33_2
-; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -5373,10 +5166,10 @@ define inreg <2 x double> @bitcast_v4f32_to_v2f64_scalar(<4 x float> inreg %a, i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-NEXT: .LBB33_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
@@ -5384,8 +5177,6 @@ define inreg <2 x double> @bitcast_v4f32_to_v2f64_scalar(<4 x float> inreg %a, i
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: s_branch .LBB33_2
-; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -5488,16 +5279,14 @@ define inreg <4 x float> @bitcast_v2f64_to_v4f32_scalar(<2 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB35_3
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB35_4
+; SI-NEXT: s_cbranch_execnz .LBB35_3
; SI-NEXT: .LBB35_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB35_3:
-; SI-NEXT: s_branch .LBB35_2
-; SI-NEXT: .LBB35_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -5508,16 +5297,14 @@ define inreg <4 x float> @bitcast_v2f64_to_v4f32_scalar(<2 x double> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB35_4
+; VI-NEXT: s_cbranch_execnz .LBB35_3
; VI-NEXT: .LBB35_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB35_3:
-; VI-NEXT: s_branch .LBB35_2
-; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -5528,16 +5315,14 @@ define inreg <4 x float> @bitcast_v2f64_to_v4f32_scalar(<2 x double> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
+; GFX9-NEXT: s_cbranch_execnz .LBB35_3
; GFX9-NEXT: .LBB35_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
-; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -5549,17 +5334,15 @@ define inreg <4 x float> @bitcast_v2f64_to_v4f32_scalar(<2 x double> inreg %a, i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-NEXT: .LBB35_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
-; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -5691,13 +5474,13 @@ define inreg <8 x i16> @bitcast_v4f32_to_v8i16_scalar(<4 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB37_3
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s11, s19, 16
; SI-NEXT: s_lshr_b32 s10, s17, 16
; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB37_4
+; SI-NEXT: s_cbranch_execnz .LBB37_3
; SI-NEXT: .LBB37_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v3, s19, 1.0
; SI-NEXT: v_add_f32_e64 v2, s18, 1.0
@@ -5707,14 +5490,8 @@ define inreg <8 x i16> @bitcast_v4f32_to_v8i16_scalar(<4 x float> inreg %a, i32
; SI-NEXT: v_lshr_b64 v[5:6], v[0:1], 16
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v1
-; SI-NEXT: s_branch .LBB37_5
+; SI-NEXT: s_branch .LBB37_4
; SI-NEXT: .LBB37_3:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: s_branch .LBB37_2
-; SI-NEXT: .LBB37_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -5723,7 +5500,7 @@ define inreg <8 x i16> @bitcast_v4f32_to_v8i16_scalar(<4 x float> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s11
; SI-NEXT: v_mov_b32_e32 v4, s4
; SI-NEXT: v_mov_b32_e32 v5, s6
-; SI-NEXT: .LBB37_5: ; %end
+; SI-NEXT: .LBB37_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -5742,9 +5519,9 @@ define inreg <8 x i16> @bitcast_v4f32_to_v8i16_scalar(<4 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB37_3
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_4
+; VI-NEXT: s_cbranch_execnz .LBB37_3
; VI-NEXT: .LBB37_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
@@ -5752,8 +5529,6 @@ define inreg <8 x i16> @bitcast_v4f32_to_v8i16_scalar(<4 x float> inreg %a, i32
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB37_3:
-; VI-NEXT: s_branch .LBB37_2
-; VI-NEXT: .LBB37_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -5764,9 +5539,9 @@ define inreg <8 x i16> @bitcast_v4f32_to_v8i16_scalar(<4 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_4
+; GFX9-NEXT: s_cbranch_execnz .LBB37_3
; GFX9-NEXT: .LBB37_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
@@ -5774,8 +5549,6 @@ define inreg <8 x i16> @bitcast_v4f32_to_v8i16_scalar(<4 x float> inreg %a, i32
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB37_3:
-; GFX9-NEXT: s_branch .LBB37_2
-; GFX9-NEXT: .LBB37_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -5787,10 +5560,10 @@ define inreg <8 x i16> @bitcast_v4f32_to_v8i16_scalar(<4 x float> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
; GFX11-NEXT: .LBB37_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
@@ -5798,8 +5571,6 @@ define inreg <8 x i16> @bitcast_v4f32_to_v8i16_scalar(<4 x float> inreg %a, i32
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: s_branch .LBB37_2
-; GFX11-NEXT: .LBB37_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -5970,7 +5741,7 @@ define inreg <4 x float> @bitcast_v8i16_to_v4f32_scalar(<8 x i16> inreg %a, i32
; SI-NEXT: s_lshr_b32 s12, s17, 16
; SI-NEXT: s_lshr_b32 s13, s16, 16
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB39_4
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s13, 16
@@ -6012,15 +5783,12 @@ define inreg <4 x float> @bitcast_v8i16_to_v4f32_scalar(<8 x i16> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v3, s7
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB39_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; SI-NEXT: s_branch .LBB39_2
;
; VI-LABEL: bitcast_v8i16_to_v4f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB39_4
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB39_3
; VI-NEXT: .LBB39_2: ; %cmp.true
@@ -6050,16 +5818,14 @@ define inreg <4 x float> @bitcast_v8i16_to_v4f32_scalar(<8 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_4:
-; VI-NEXT: s_branch .LBB39_2
;
; GFX9-LABEL: bitcast_v8i16_to_v4f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
+; GFX9-NEXT: s_cbranch_execnz .LBB39_3
; GFX9-NEXT: .LBB39_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
@@ -6067,8 +5833,6 @@ define inreg <4 x float> @bitcast_v8i16_to_v4f32_scalar(<8 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
-; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -6080,10 +5844,10 @@ define inreg <4 x float> @bitcast_v8i16_to_v4f32_scalar(<8 x i16> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_3
; GFX11-NEXT: .LBB39_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
@@ -6091,8 +5855,6 @@ define inreg <4 x float> @bitcast_v8i16_to_v4f32_scalar(<8 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
-; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -6224,13 +5986,13 @@ define inreg <8 x half> @bitcast_v4f32_to_v8f16_scalar(<4 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB41_3
+; SI-NEXT: s_cbranch_scc0 .LBB41_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s11, s19, 16
; SI-NEXT: s_lshr_b32 s10, s17, 16
; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB41_4
+; SI-NEXT: s_cbranch_execnz .LBB41_3
; SI-NEXT: .LBB41_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v3, s19, 1.0
; SI-NEXT: v_add_f32_e64 v2, s18, 1.0
@@ -6240,14 +6002,8 @@ define inreg <8 x half> @bitcast_v4f32_to_v8f16_scalar(<4 x float> inreg %a, i32
; SI-NEXT: v_lshr_b64 v[5:6], v[0:1], 16
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v1
-; SI-NEXT: s_branch .LBB41_5
+; SI-NEXT: s_branch .LBB41_4
; SI-NEXT: .LBB41_3:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: s_branch .LBB41_2
-; SI-NEXT: .LBB41_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -6256,7 +6012,7 @@ define inreg <8 x half> @bitcast_v4f32_to_v8f16_scalar(<4 x float> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s11
; SI-NEXT: v_mov_b32_e32 v4, s4
; SI-NEXT: v_mov_b32_e32 v5, s6
-; SI-NEXT: .LBB41_5: ; %end
+; SI-NEXT: .LBB41_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -6275,9 +6031,9 @@ define inreg <8 x half> @bitcast_v4f32_to_v8f16_scalar(<4 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB41_3
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB41_4
+; VI-NEXT: s_cbranch_execnz .LBB41_3
; VI-NEXT: .LBB41_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
@@ -6285,8 +6041,6 @@ define inreg <8 x half> @bitcast_v4f32_to_v8f16_scalar(<4 x float> inreg %a, i32
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB41_3:
-; VI-NEXT: s_branch .LBB41_2
-; VI-NEXT: .LBB41_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -6297,9 +6051,9 @@ define inreg <8 x half> @bitcast_v4f32_to_v8f16_scalar(<4 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB41_4
+; GFX9-NEXT: s_cbranch_execnz .LBB41_3
; GFX9-NEXT: .LBB41_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
@@ -6307,8 +6061,6 @@ define inreg <8 x half> @bitcast_v4f32_to_v8f16_scalar(<4 x float> inreg %a, i32
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB41_3:
-; GFX9-NEXT: s_branch .LBB41_2
-; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -6320,10 +6072,10 @@ define inreg <8 x half> @bitcast_v4f32_to_v8f16_scalar(<4 x float> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
; GFX11-NEXT: .LBB41_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
@@ -6331,8 +6083,6 @@ define inreg <8 x half> @bitcast_v4f32_to_v8f16_scalar(<4 x float> inreg %a, i32
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB41_3:
-; GFX11-NEXT: s_branch .LBB41_2
-; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -6520,7 +6270,7 @@ define inreg <4 x float> @bitcast_v8f16_to_v4f32_scalar(<8 x half> inreg %a, i32
; SI-NEXT: s_lshr_b32 s12, s17, 16
; SI-NEXT: s_lshr_b32 s13, s16, 16
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB43_3
+; SI-NEXT: s_cbranch_scc0 .LBB43_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s13, 16
@@ -6534,7 +6284,7 @@ define inreg <4 x float> @bitcast_v8f16_to_v4f32_scalar(<8 x half> inreg %a, i32
; SI-NEXT: s_and_b32 s7, s19, 0xffff
; SI-NEXT: s_lshl_b32 s8, s10, 16
; SI-NEXT: s_or_b32 s7, s7, s8
-; SI-NEXT: s_cbranch_execnz .LBB43_4
+; SI-NEXT: s_cbranch_execnz .LBB43_3
; SI-NEXT: .LBB43_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s13
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -6570,9 +6320,6 @@ define inreg <4 x float> @bitcast_v8f16_to_v4f32_scalar(<8 x half> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v3, v5, v3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB43_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; SI-NEXT: s_branch .LBB43_2
-; SI-NEXT: .LBB43_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -6583,9 +6330,9 @@ define inreg <4 x float> @bitcast_v8f16_to_v4f32_scalar(<8 x half> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB43_3
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB43_4
+; VI-NEXT: s_cbranch_execnz .LBB43_3
; VI-NEXT: .LBB43_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s19, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -6610,8 +6357,6 @@ define inreg <4 x float> @bitcast_v8f16_to_v4f32_scalar(<8 x half> inreg %a, i32
; VI-NEXT: v_or_b32_e32 v0, v0, v4
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB43_3:
-; VI-NEXT: s_branch .LBB43_2
-; VI-NEXT: .LBB43_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -6622,9 +6367,9 @@ define inreg <4 x float> @bitcast_v8f16_to_v4f32_scalar(<8 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
+; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v3, s19, v0 op_sel_hi:[1,0]
@@ -6633,8 +6378,6 @@ define inreg <4 x float> @bitcast_v8f16_to_v4f32_scalar(<8 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
-; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -6646,10 +6389,10 @@ define inreg <4 x float> @bitcast_v8f16_to_v4f32_scalar(<8 x half> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-NEXT: .LBB43_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
@@ -6657,8 +6400,6 @@ define inreg <4 x float> @bitcast_v8f16_to_v4f32_scalar(<8 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
-; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -6811,7 +6552,7 @@ define inreg <8 x bfloat> @bitcast_v4f32_to_v8bf16_scalar(<4 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB45_3
+; SI-NEXT: s_cbranch_scc0 .LBB45_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s19, 0xffff0000
; SI-NEXT: s_lshl_b32 s7, s19, 16
@@ -6821,7 +6562,7 @@ define inreg <8 x bfloat> @bitcast_v4f32_to_v8bf16_scalar(<4 x float> inreg %a,
; SI-NEXT: s_lshl_b32 s11, s17, 16
; SI-NEXT: s_and_b32 s12, s16, 0xffff0000
; SI-NEXT: s_lshl_b32 s13, s16, 16
-; SI-NEXT: s_cbranch_execnz .LBB45_4
+; SI-NEXT: s_cbranch_execnz .LBB45_3
; SI-NEXT: .LBB45_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
@@ -6835,18 +6576,8 @@ define inreg <8 x bfloat> @bitcast_v4f32_to_v8bf16_scalar(<4 x float> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v0
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: s_branch .LBB45_5
+; SI-NEXT: s_branch .LBB45_4
; SI-NEXT: .LBB45_3:
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB45_2
-; SI-NEXT: .LBB45_4:
; SI-NEXT: v_mov_b32_e32 v0, s13
; SI-NEXT: v_mov_b32_e32 v1, s12
; SI-NEXT: v_mov_b32_e32 v2, s11
@@ -6855,7 +6586,7 @@ define inreg <8 x bfloat> @bitcast_v4f32_to_v8bf16_scalar(<4 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v6, s8
; SI-NEXT: v_mov_b32_e32 v4, s7
; SI-NEXT: v_mov_b32_e32 v5, s6
-; SI-NEXT: .LBB45_5: ; %end
+; SI-NEXT: .LBB45_4: ; %end
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
@@ -6878,9 +6609,9 @@ define inreg <8 x bfloat> @bitcast_v4f32_to_v8bf16_scalar(<4 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB45_3
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB45_4
+; VI-NEXT: s_cbranch_execnz .LBB45_3
; VI-NEXT: .LBB45_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
@@ -6888,8 +6619,6 @@ define inreg <8 x bfloat> @bitcast_v4f32_to_v8bf16_scalar(<4 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB45_3:
-; VI-NEXT: s_branch .LBB45_2
-; VI-NEXT: .LBB45_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -6900,9 +6629,9 @@ define inreg <8 x bfloat> @bitcast_v4f32_to_v8bf16_scalar(<4 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB45_4
+; GFX9-NEXT: s_cbranch_execnz .LBB45_3
; GFX9-NEXT: .LBB45_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
@@ -6910,8 +6639,6 @@ define inreg <8 x bfloat> @bitcast_v4f32_to_v8bf16_scalar(<4 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB45_3:
-; GFX9-NEXT: s_branch .LBB45_2
-; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -6923,10 +6650,10 @@ define inreg <8 x bfloat> @bitcast_v4f32_to_v8bf16_scalar(<4 x float> inreg %a,
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
; GFX11-NEXT: .LBB45_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
@@ -6934,8 +6661,6 @@ define inreg <8 x bfloat> @bitcast_v4f32_to_v8bf16_scalar(<4 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB45_3:
-; GFX11-NEXT: s_branch .LBB45_2
-; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -7401,7 +7126,7 @@ define inreg <4 x float> @bitcast_v8bf16_to_v4f32_scalar(<8 x bfloat> inreg %a,
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s7
; SI-NEXT: v_mul_f32_e64 v13, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB47_4
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v16
; SI-NEXT: v_lshr_b64 v[0:1], v[11:12], 16
@@ -7439,17 +7164,14 @@ define inreg <4 x float> @bitcast_v8bf16_to_v4f32_scalar(<8 x bfloat> inreg %a,
; SI-NEXT: v_lshr_b64 v[3:4], v[3:4], 16
; SI-NEXT: .LBB47_3: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB47_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; SI-NEXT: s_branch .LBB47_2
;
; VI-LABEL: bitcast_v8bf16_to_v4f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_4
+; VI-NEXT: s_cbranch_execnz .LBB47_3
; VI-NEXT: .LBB47_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s19, 16
; VI-NEXT: v_mov_b32_e32 v7, 0x40c00000
@@ -7528,8 +7250,6 @@ define inreg <4 x float> @bitcast_v8bf16_to_v4f32_scalar(<8 x bfloat> inreg %a,
; VI-NEXT: v_mov_b32_e32 v3, v4
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB47_3:
-; VI-NEXT: s_branch .LBB47_2
-; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -7540,9 +7260,9 @@ define inreg <4 x float> @bitcast_v8bf16_to_v4f32_scalar(<8 x bfloat> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
+; GFX9-NEXT: s_cbranch_execnz .LBB47_3
; GFX9-NEXT: .LBB47_2: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s19
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -7624,8 +7344,6 @@ define inreg <4 x float> @bitcast_v8bf16_to_v4f32_scalar(<8 x bfloat> inreg %a,
; GFX9-NEXT: v_lshl_or_b32 v0, v5, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
-; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -7637,10 +7355,10 @@ define inreg <4 x float> @bitcast_v8bf16_to_v4f32_scalar(<8 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s3
; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
@@ -7724,8 +7442,6 @@ define inreg <4 x float> @bitcast_v8bf16_to_v4f32_scalar(<8 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v5.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB47_2
-; GFX11-TRUE16-NEXT: .LBB47_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -7735,10 +7451,10 @@ define inreg <4 x float> @bitcast_v8bf16_to_v4f32_scalar(<8 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-FAKE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s3, 16
; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s3
@@ -7827,8 +7543,6 @@ define inreg <4 x float> @bitcast_v8bf16_to_v4f32_scalar(<8 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v7, 16, v8
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB47_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB47_2
-; GFX11-FAKE16-NEXT: .LBB47_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
@@ -8176,7 +7890,7 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB49_3
+; SI-NEXT: s_cbranch_scc0 .LBB49_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s24, s19, 24
; SI-NEXT: s_lshr_b32 s26, s19, 16
@@ -8190,7 +7904,7 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16
; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8
-; SI-NEXT: s_cbranch_execnz .LBB49_4
+; SI-NEXT: s_cbranch_execnz .LBB49_3
; SI-NEXT: .LBB49_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v21, s17, 1.0
; SI-NEXT: v_add_f32_e64 v20, s16, 1.0
@@ -8208,22 +7922,8 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v21
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v21
; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v21
-; SI-NEXT: s_branch .LBB49_5
+; SI-NEXT: s_branch .LBB49_4
; SI-NEXT: .LBB49_3:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr23
-; SI-NEXT: ; implicit-def: $sgpr22
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: s_branch .LBB49_2
-; SI-NEXT: .LBB49_4:
; SI-NEXT: v_mov_b32_e32 v20, s16
; SI-NEXT: v_mov_b32_e32 v21, s17
; SI-NEXT: v_mov_b32_e32 v18, s18
@@ -8240,7 +7940,7 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v3, s4
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: v_mov_b32_e32 v1, s8
-; SI-NEXT: .LBB49_5: ; %end
+; SI-NEXT: .LBB49_4: ; %end
; SI-NEXT: v_mov_b32_e32 v2, v0
; SI-NEXT: v_mov_b32_e32 v0, v20
; SI-NEXT: v_mov_b32_e32 v4, v21
@@ -8253,7 +7953,7 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB49_3
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s10, s19, 24
; VI-NEXT: s_lshr_b32 s11, s19, 16
@@ -8267,7 +7967,7 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; VI-NEXT: s_lshr_b32 s23, s16, 8
; VI-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB49_4
+; VI-NEXT: s_cbranch_execnz .LBB49_3
; VI-NEXT: .LBB49_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v19, s17, 1.0
; VI-NEXT: v_add_f32_e64 v18, s16, 1.0
@@ -8285,22 +7985,8 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; VI-NEXT: v_lshrrev_b32_e32 v5, 8, v19
; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v18
; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v18
-; VI-NEXT: s_branch .LBB49_5
+; VI-NEXT: s_branch .LBB49_4
; VI-NEXT: .LBB49_3:
-; VI-NEXT: ; implicit-def: $sgpr23
-; VI-NEXT: ; implicit-def: $sgpr21
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr22
-; VI-NEXT: ; implicit-def: $sgpr20
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB49_2
-; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v18, s16
; VI-NEXT: v_mov_b32_e32 v19, s17
; VI-NEXT: v_mov_b32_e32 v16, s18
@@ -8317,7 +8003,7 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v15, s10
; VI-NEXT: v_mov_b32_e32 v11, s6
; VI-NEXT: v_mov_b32_e32 v3, s4
-; VI-NEXT: .LBB49_5: ; %end
+; VI-NEXT: .LBB49_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, v18
; VI-NEXT: v_mov_b32_e32 v4, v19
; VI-NEXT: v_mov_b32_e32 v8, v16
@@ -8328,7 +8014,7 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s10, s19, 24
; GFX9-NEXT: s_lshr_b32 s11, s19, 16
@@ -8342,7 +8028,7 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; GFX9-NEXT: s_lshr_b32 s23, s16, 8
; GFX9-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
+; GFX9-NEXT: s_cbranch_execnz .LBB49_3
; GFX9-NEXT: .LBB49_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v19, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v18, s16, 1.0
@@ -8360,22 +8046,8 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v19
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v18
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v18
-; GFX9-NEXT: s_branch .LBB49_5
+; GFX9-NEXT: s_branch .LBB49_4
; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: ; implicit-def: $sgpr23
-; GFX9-NEXT: ; implicit-def: $sgpr21
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr22
-; GFX9-NEXT: ; implicit-def: $sgpr20
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB49_2
-; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v18, s16
; GFX9-NEXT: v_mov_b32_e32 v19, s17
; GFX9-NEXT: v_mov_b32_e32 v16, s18
@@ -8392,7 +8064,7 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v15, s10
; GFX9-NEXT: v_mov_b32_e32 v11, s6
; GFX9-NEXT: v_mov_b32_e32 v3, s4
-; GFX9-NEXT: .LBB49_5: ; %end
+; GFX9-NEXT: .LBB49_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, v18
; GFX9-NEXT: v_mov_b32_e32 v4, v19
; GFX9-NEXT: v_mov_b32_e32 v8, v16
@@ -8404,7 +8076,7 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s9, s3, 24
; GFX11-NEXT: s_lshr_b32 s10, s3, 16
@@ -8419,7 +8091,7 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v19, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v17, s3, 1.0
@@ -8439,22 +8111,8 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v19
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v18
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v18
-; GFX11-NEXT: s_branch .LBB49_5
+; GFX11-NEXT: s_branch .LBB49_4
; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: ; implicit-def: $sgpr18
-; GFX11-NEXT: ; implicit-def: $sgpr16
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr17
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: s_branch .LBB49_2
-; GFX11-NEXT: .LBB49_4:
; GFX11-NEXT: v_dual_mov_b32 v18, s0 :: v_dual_mov_b32 v19, s1
; GFX11-NEXT: v_dual_mov_b32 v16, s2 :: v_dual_mov_b32 v17, s3
; GFX11-NEXT: v_dual_mov_b32 v1, s18 :: v_dual_mov_b32 v2, s16
@@ -8465,7 +8123,7 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; GFX11-NEXT: v_mov_b32_e32 v15, s9
; GFX11-NEXT: v_mov_b32_e32 v11, s6
; GFX11-NEXT: v_mov_b32_e32 v3, s4
-; GFX11-NEXT: .LBB49_5: ; %end
+; GFX11-NEXT: .LBB49_4: ; %end
; GFX11-NEXT: v_mov_b32_e32 v0, v18
; GFX11-NEXT: v_mov_b32_e32 v4, v19
; GFX11-NEXT: v_mov_b32_e32 v8, v16
@@ -9021,7 +8679,7 @@ define inreg <4 x float> @bitcast_v16i8_to_v4f32_scalar(<16 x i8> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s10, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s11, v0
-; SI-NEXT: s_cbranch_scc0 .LBB51_4
+; SI-NEXT: s_cbranch_scc0 .LBB51_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -9119,9 +8777,6 @@ define inreg <4 x float> @bitcast_v16i8_to_v4f32_scalar(<16 x i8> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v3, s7
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB51_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; SI-NEXT: s_branch .LBB51_2
;
; VI-LABEL: bitcast_v16i8_to_v4f32_scalar:
; VI: ; %bb.0:
@@ -9130,7 +8785,7 @@ define inreg <4 x float> @bitcast_v16i8_to_v4f32_scalar(<16 x i8> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB51_4
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v3, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -9202,9 +8857,6 @@ define inreg <4 x float> @bitcast_v16i8_to_v4f32_scalar(<16 x i8> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v3
; VI-NEXT: .LBB51_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; VI-NEXT: s_branch .LBB51_2
;
; GFX9-LABEL: bitcast_v16i8_to_v4f32_scalar:
; GFX9: ; %bb.0:
@@ -9213,7 +8865,7 @@ define inreg <4 x float> @bitcast_v16i8_to_v4f32_scalar(<16 x i8> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v3, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -9282,16 +8934,13 @@ define inreg <4 x float> @bitcast_v16i8_to_v4f32_scalar(<16 x i8> inreg %a, i32
; GFX9-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB51_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB51_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX9-NEXT: s_branch .LBB51_2
;
; GFX11-LABEL: bitcast_v16i8_to_v4f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -9356,9 +9005,6 @@ define inreg <4 x float> @bitcast_v16i8_to_v4f32_scalar(<16 x i8> inreg %a, i32
; GFX11-NEXT: v_or_b32_e32 v3, v7, v8
; GFX11-NEXT: .LBB51_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB51_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX11-NEXT: s_branch .LBB51_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9464,7 +9110,7 @@ define inreg <2 x double> @bitcast_v2i64_to_v2f64_scalar(<2 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB53_4
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB53_3
; SI-NEXT: .LBB53_2: ; %cmp.true
@@ -9478,14 +9124,12 @@ define inreg <2 x double> @bitcast_v2i64_to_v2f64_scalar(<2 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB53_4:
-; SI-NEXT: s_branch .LBB53_2
;
; VI-LABEL: bitcast_v2i64_to_v2f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB53_4
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB53_3
; VI-NEXT: .LBB53_2: ; %cmp.true
@@ -9499,14 +9143,12 @@ define inreg <2 x double> @bitcast_v2i64_to_v2f64_scalar(<2 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB53_4:
-; VI-NEXT: s_branch .LBB53_2
;
; GFX9-LABEL: bitcast_v2i64_to_v2f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB53_3
; GFX9-NEXT: .LBB53_2: ; %cmp.true
@@ -9520,15 +9162,13 @@ define inreg <2 x double> @bitcast_v2i64_to_v2f64_scalar(<2 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB53_4:
-; GFX9-NEXT: s_branch .LBB53_2
;
; GFX11-LABEL: bitcast_v2i64_to_v2f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
@@ -9541,8 +9181,6 @@ define inreg <2 x double> @bitcast_v2i64_to_v2f64_scalar(<2 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9642,16 +9280,14 @@ define inreg <2 x i64> @bitcast_v2f64_to_v2i64_scalar(<2 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB55_3
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB55_4
+; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB55_3:
-; SI-NEXT: s_branch .LBB55_2
-; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -9662,16 +9298,14 @@ define inreg <2 x i64> @bitcast_v2f64_to_v2i64_scalar(<2 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_4
+; VI-NEXT: s_cbranch_execnz .LBB55_3
; VI-NEXT: .LBB55_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB55_3:
-; VI-NEXT: s_branch .LBB55_2
-; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -9682,16 +9316,14 @@ define inreg <2 x i64> @bitcast_v2f64_to_v2i64_scalar(<2 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
+; GFX9-NEXT: s_cbranch_execnz .LBB55_3
; GFX9-NEXT: .LBB55_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
-; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -9703,17 +9335,15 @@ define inreg <2 x i64> @bitcast_v2f64_to_v2i64_scalar(<2 x double> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-NEXT: .LBB55_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
-; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -9848,7 +9478,7 @@ define inreg <8 x i16> @bitcast_v2i64_to_v8i16_scalar(<2 x i64> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB57_4
+; SI-NEXT: s_cbranch_scc0 .LBB57_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s10, s19, 16
; SI-NEXT: s_lshr_b32 s11, s17, 16
@@ -9882,18 +9512,12 @@ define inreg <8 x i16> @bitcast_v2i64_to_v8i16_scalar(<2 x i64> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v2, s4
; SI-NEXT: v_mov_b32_e32 v3, s7
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB57_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: s_branch .LBB57_2
;
; VI-LABEL: bitcast_v2i64_to_v8i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB57_3
; VI-NEXT: .LBB57_2: ; %cmp.true
@@ -9907,14 +9531,12 @@ define inreg <8 x i16> @bitcast_v2i64_to_v8i16_scalar(<2 x i64> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v2i64_to_v8i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB57_3
; GFX9-NEXT: .LBB57_2: ; %cmp.true
@@ -9928,15 +9550,13 @@ define inreg <8 x i16> @bitcast_v2i64_to_v8i16_scalar(<2 x i64> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB57_4:
-; GFX9-NEXT: s_branch .LBB57_2
;
; GFX11-LABEL: bitcast_v2i64_to_v8i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB57_3
@@ -9950,8 +9570,6 @@ define inreg <8 x i16> @bitcast_v2i64_to_v8i16_scalar(<2 x i64> inreg %a, i32 in
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB57_4:
-; GFX11-NEXT: s_branch .LBB57_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10119,7 +9737,7 @@ define inreg <2 x i64> @bitcast_v8i16_to_v2i64_scalar(<8 x i16> inreg %a, i32 in
; SI-NEXT: s_lshr_b32 s12, s17, 16
; SI-NEXT: s_lshr_b32 s13, s16, 16
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB59_4
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s13, 16
@@ -10161,15 +9779,12 @@ define inreg <2 x i64> @bitcast_v8i16_to_v2i64_scalar(<8 x i16> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v3, s7
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB59_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; SI-NEXT: s_branch .LBB59_2
;
; VI-LABEL: bitcast_v8i16_to_v2i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_4
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB59_3
; VI-NEXT: .LBB59_2: ; %cmp.true
@@ -10199,16 +9814,14 @@ define inreg <2 x i64> @bitcast_v8i16_to_v2i64_scalar(<8 x i16> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB59_4:
-; VI-NEXT: s_branch .LBB59_2
;
; GFX9-LABEL: bitcast_v8i16_to_v2i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
+; GFX9-NEXT: s_cbranch_execnz .LBB59_3
; GFX9-NEXT: .LBB59_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
@@ -10216,8 +9829,6 @@ define inreg <2 x i64> @bitcast_v8i16_to_v2i64_scalar(<8 x i16> inreg %a, i32 in
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
-; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -10229,10 +9840,10 @@ define inreg <2 x i64> @bitcast_v8i16_to_v2i64_scalar(<8 x i16> inreg %a, i32 in
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-NEXT: .LBB59_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
@@ -10240,8 +9851,6 @@ define inreg <2 x i64> @bitcast_v8i16_to_v2i64_scalar(<8 x i16> inreg %a, i32 in
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB59_3:
-; GFX11-NEXT: s_branch .LBB59_2
-; GFX11-NEXT: .LBB59_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -10376,7 +9985,7 @@ define inreg <8 x half> @bitcast_v2i64_to_v8f16_scalar(<2 x i64> inreg %a, i32 i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB61_4
+; SI-NEXT: s_cbranch_scc0 .LBB61_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s10, s19, 16
; SI-NEXT: s_lshr_b32 s11, s17, 16
@@ -10410,18 +10019,12 @@ define inreg <8 x half> @bitcast_v2i64_to_v8f16_scalar(<2 x i64> inreg %a, i32 i
; SI-NEXT: v_mov_b32_e32 v2, s4
; SI-NEXT: v_mov_b32_e32 v3, s7
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB61_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: s_branch .LBB61_2
;
; VI-LABEL: bitcast_v2i64_to_v8f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB61_4
+; VI-NEXT: s_cbranch_scc0 .LBB61_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB61_3
; VI-NEXT: .LBB61_2: ; %cmp.true
@@ -10435,14 +10038,12 @@ define inreg <8 x half> @bitcast_v2i64_to_v8f16_scalar(<2 x i64> inreg %a, i32 i
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB61_4:
-; VI-NEXT: s_branch .LBB61_2
;
; GFX9-LABEL: bitcast_v2i64_to_v8f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB61_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB61_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB61_3
; GFX9-NEXT: .LBB61_2: ; %cmp.true
@@ -10456,15 +10057,13 @@ define inreg <8 x half> @bitcast_v2i64_to_v8f16_scalar(<2 x i64> inreg %a, i32 i
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB61_4:
-; GFX9-NEXT: s_branch .LBB61_2
;
; GFX11-LABEL: bitcast_v2i64_to_v8f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB61_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB61_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB61_3
@@ -10478,8 +10077,6 @@ define inreg <8 x half> @bitcast_v2i64_to_v8f16_scalar(<2 x i64> inreg %a, i32 i
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB61_4:
-; GFX11-NEXT: s_branch .LBB61_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10664,7 +10261,7 @@ define inreg <2 x i64> @bitcast_v8f16_to_v2i64_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: s_lshr_b32 s12, s17, 16
; SI-NEXT: s_lshr_b32 s13, s16, 16
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB63_3
+; SI-NEXT: s_cbranch_scc0 .LBB63_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s13, 16
@@ -10678,7 +10275,7 @@ define inreg <2 x i64> @bitcast_v8f16_to_v2i64_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: s_and_b32 s7, s19, 0xffff
; SI-NEXT: s_lshl_b32 s8, s10, 16
; SI-NEXT: s_or_b32 s7, s7, s8
-; SI-NEXT: s_cbranch_execnz .LBB63_4
+; SI-NEXT: s_cbranch_execnz .LBB63_3
; SI-NEXT: .LBB63_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s13
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -10714,9 +10311,6 @@ define inreg <2 x i64> @bitcast_v8f16_to_v2i64_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: v_or_b32_e32 v3, v5, v3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB63_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; SI-NEXT: s_branch .LBB63_2
-; SI-NEXT: .LBB63_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -10727,9 +10321,9 @@ define inreg <2 x i64> @bitcast_v8f16_to_v2i64_scalar(<8 x half> inreg %a, i32 i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB63_3
+; VI-NEXT: s_cbranch_scc0 .LBB63_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB63_4
+; VI-NEXT: s_cbranch_execnz .LBB63_3
; VI-NEXT: .LBB63_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s19, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -10754,8 +10348,6 @@ define inreg <2 x i64> @bitcast_v8f16_to_v2i64_scalar(<8 x half> inreg %a, i32 i
; VI-NEXT: v_or_b32_e32 v0, v0, v4
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB63_3:
-; VI-NEXT: s_branch .LBB63_2
-; VI-NEXT: .LBB63_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -10766,9 +10358,9 @@ define inreg <2 x i64> @bitcast_v8f16_to_v2i64_scalar(<8 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB63_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB63_4
+; GFX9-NEXT: s_cbranch_execnz .LBB63_3
; GFX9-NEXT: .LBB63_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v3, s19, v0 op_sel_hi:[1,0]
@@ -10777,8 +10369,6 @@ define inreg <2 x i64> @bitcast_v8f16_to_v2i64_scalar(<8 x half> inreg %a, i32 i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB63_3:
-; GFX9-NEXT: s_branch .LBB63_2
-; GFX9-NEXT: .LBB63_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -10790,10 +10380,10 @@ define inreg <2 x i64> @bitcast_v8f16_to_v2i64_scalar(<8 x half> inreg %a, i32 i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB63_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB63_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB63_3
; GFX11-NEXT: .LBB63_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
@@ -10801,8 +10391,6 @@ define inreg <2 x i64> @bitcast_v8f16_to_v2i64_scalar(<8 x half> inreg %a, i32 i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB63_3:
-; GFX11-NEXT: s_branch .LBB63_2
-; GFX11-NEXT: .LBB63_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -10958,7 +10546,7 @@ define inreg <8 x bfloat> @bitcast_v2i64_to_v8bf16_scalar(<2 x i64> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB65_4
+; SI-NEXT: s_cbranch_scc0 .LBB65_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s7, s19, 0xffff0000
; SI-NEXT: s_lshl_b32 s6, s19, 16
@@ -11000,22 +10588,12 @@ define inreg <8 x bfloat> @bitcast_v2i64_to_v8bf16_scalar(<2 x i64> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s6
; SI-NEXT: v_lshr_b64 v[3:4], v[3:4], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB65_4:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB65_2
;
; VI-LABEL: bitcast_v2i64_to_v8bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB65_4
+; VI-NEXT: s_cbranch_scc0 .LBB65_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB65_3
; VI-NEXT: .LBB65_2: ; %cmp.true
@@ -11029,14 +10607,12 @@ define inreg <8 x bfloat> @bitcast_v2i64_to_v8bf16_scalar(<2 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB65_4:
-; VI-NEXT: s_branch .LBB65_2
;
; GFX9-LABEL: bitcast_v2i64_to_v8bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB65_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB65_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB65_3
; GFX9-NEXT: .LBB65_2: ; %cmp.true
@@ -11050,15 +10626,13 @@ define inreg <8 x bfloat> @bitcast_v2i64_to_v8bf16_scalar(<2 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB65_4:
-; GFX9-NEXT: s_branch .LBB65_2
;
; GFX11-LABEL: bitcast_v2i64_to_v8bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB65_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB65_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB65_3
@@ -11072,8 +10646,6 @@ define inreg <8 x bfloat> @bitcast_v2i64_to_v8bf16_scalar(<2 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB65_4:
-; GFX11-NEXT: s_branch .LBB65_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11536,7 +11108,7 @@ define inreg <2 x i64> @bitcast_v8bf16_to_v2i64_scalar(<8 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s7
; SI-NEXT: v_mul_f32_e64 v13, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB67_4
+; SI-NEXT: s_cbranch_scc0 .LBB67_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v16
; SI-NEXT: v_lshr_b64 v[0:1], v[11:12], 16
@@ -11574,17 +11146,14 @@ define inreg <2 x i64> @bitcast_v8bf16_to_v2i64_scalar(<8 x bfloat> inreg %a, i3
; SI-NEXT: v_lshr_b64 v[3:4], v[3:4], 16
; SI-NEXT: .LBB67_3: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB67_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; SI-NEXT: s_branch .LBB67_2
;
; VI-LABEL: bitcast_v8bf16_to_v2i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB67_3
+; VI-NEXT: s_cbranch_scc0 .LBB67_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB67_4
+; VI-NEXT: s_cbranch_execnz .LBB67_3
; VI-NEXT: .LBB67_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s19, 16
; VI-NEXT: v_mov_b32_e32 v7, 0x40c00000
@@ -11663,8 +11232,6 @@ define inreg <2 x i64> @bitcast_v8bf16_to_v2i64_scalar(<8 x bfloat> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v3, v4
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB67_3:
-; VI-NEXT: s_branch .LBB67_2
-; VI-NEXT: .LBB67_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -11675,9 +11242,9 @@ define inreg <2 x i64> @bitcast_v8bf16_to_v2i64_scalar(<8 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB67_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB67_4
+; GFX9-NEXT: s_cbranch_execnz .LBB67_3
; GFX9-NEXT: .LBB67_2: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s19
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -11759,8 +11326,6 @@ define inreg <2 x i64> @bitcast_v8bf16_to_v2i64_scalar(<8 x bfloat> inreg %a, i3
; GFX9-NEXT: v_lshl_or_b32 v0, v5, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB67_3:
-; GFX9-NEXT: s_branch .LBB67_2
-; GFX9-NEXT: .LBB67_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -11772,10 +11337,10 @@ define inreg <2 x i64> @bitcast_v8bf16_to_v2i64_scalar(<8 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB67_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB67_3
; GFX11-TRUE16-NEXT: .LBB67_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s3
; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
@@ -11859,8 +11424,6 @@ define inreg <2 x i64> @bitcast_v8bf16_to_v2i64_scalar(<8 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v5.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB67_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB67_2
-; GFX11-TRUE16-NEXT: .LBB67_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -11870,10 +11433,10 @@ define inreg <2 x i64> @bitcast_v8bf16_to_v2i64_scalar(<8 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB67_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB67_3
; GFX11-FAKE16-NEXT: .LBB67_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s3, 16
; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s3
@@ -11962,8 +11525,6 @@ define inreg <2 x i64> @bitcast_v8bf16_to_v2i64_scalar(<8 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v7, 16, v8
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB67_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB67_2
-; GFX11-FAKE16-NEXT: .LBB67_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
@@ -12315,7 +11876,7 @@ define inreg <16 x i8> @bitcast_v2i64_to_v16i8_scalar(<2 x i64> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB69_4
+; SI-NEXT: s_cbranch_scc0 .LBB69_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s22, s19, 24
; SI-NEXT: s_lshr_b32 s23, s19, 16
@@ -12365,26 +11926,12 @@ define inreg <16 x i8> @bitcast_v2i64_to_v16i8_scalar(<2 x i64> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v14, s23
; SI-NEXT: v_mov_b32_e32 v15, s22
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB69_4:
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr23
-; SI-NEXT: ; implicit-def: $sgpr22
-; SI-NEXT: s_branch .LBB69_2
;
; VI-LABEL: bitcast_v2i64_to_v16i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB69_4
+; VI-NEXT: s_cbranch_scc0 .LBB69_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s10, s19, 24
; VI-NEXT: s_lshr_b32 s11, s19, 16
@@ -12434,26 +11981,12 @@ define inreg <16 x i8> @bitcast_v2i64_to_v16i8_scalar(<2 x i64> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v14, s11
; VI-NEXT: v_mov_b32_e32 v15, s10
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB69_4:
-; VI-NEXT: ; implicit-def: $sgpr23
-; VI-NEXT: ; implicit-def: $sgpr22
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr21
-; VI-NEXT: ; implicit-def: $sgpr20
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB69_2
;
; GFX9-LABEL: bitcast_v2i64_to_v16i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB69_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB69_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s10, s19, 24
; GFX9-NEXT: s_lshr_b32 s11, s19, 16
@@ -12503,27 +12036,13 @@ define inreg <16 x i8> @bitcast_v2i64_to_v16i8_scalar(<2 x i64> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v14, s11
; GFX9-NEXT: v_mov_b32_e32 v15, s10
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB69_4:
-; GFX9-NEXT: ; implicit-def: $sgpr23
-; GFX9-NEXT: ; implicit-def: $sgpr22
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr21
-; GFX9-NEXT: ; implicit-def: $sgpr20
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB69_2
;
; GFX11-LABEL: bitcast_v2i64_to_v16i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s18, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB69_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB69_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s8, s3, 24
; GFX11-NEXT: s_lshr_b32 s9, s3, 16
@@ -12567,20 +12086,6 @@ define inreg <16 x i8> @bitcast_v2i64_to_v16i8_scalar(<2 x i64> inreg %a, i32 in
; GFX11-NEXT: v_dual_mov_b32 v12, s3 :: v_dual_mov_b32 v13, s10
; GFX11-NEXT: v_dual_mov_b32 v14, s9 :: v_dual_mov_b32 v15, s8
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB69_4:
-; GFX11-NEXT: ; implicit-def: $sgpr17
-; GFX11-NEXT: ; implicit-def: $sgpr16
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: s_branch .LBB69_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13131,7 +12636,7 @@ define inreg <2 x i64> @bitcast_v16i8_to_v2i64_scalar(<16 x i8> inreg %a, i32 in
; SI-NEXT: v_readfirstlane_b32 s10, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s11, v0
-; SI-NEXT: s_cbranch_scc0 .LBB71_4
+; SI-NEXT: s_cbranch_scc0 .LBB71_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -13229,9 +12734,6 @@ define inreg <2 x i64> @bitcast_v16i8_to_v2i64_scalar(<16 x i8> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v3, s7
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB71_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; SI-NEXT: s_branch .LBB71_2
;
; VI-LABEL: bitcast_v16i8_to_v2i64_scalar:
; VI: ; %bb.0:
@@ -13240,7 +12742,7 @@ define inreg <2 x i64> @bitcast_v16i8_to_v2i64_scalar(<16 x i8> inreg %a, i32 in
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB71_4
+; VI-NEXT: s_cbranch_scc0 .LBB71_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v3, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -13312,9 +12814,6 @@ define inreg <2 x i64> @bitcast_v16i8_to_v2i64_scalar(<16 x i8> inreg %a, i32 in
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v3
; VI-NEXT: .LBB71_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB71_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; VI-NEXT: s_branch .LBB71_2
;
; GFX9-LABEL: bitcast_v16i8_to_v2i64_scalar:
; GFX9: ; %bb.0:
@@ -13323,7 +12822,7 @@ define inreg <2 x i64> @bitcast_v16i8_to_v2i64_scalar(<16 x i8> inreg %a, i32 in
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB71_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB71_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v3, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -13392,16 +12891,13 @@ define inreg <2 x i64> @bitcast_v16i8_to_v2i64_scalar(<16 x i8> inreg %a, i32 in
; GFX9-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB71_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB71_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX9-NEXT: s_branch .LBB71_2
;
; GFX11-LABEL: bitcast_v16i8_to_v2i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB71_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB71_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -13466,9 +12962,6 @@ define inreg <2 x i64> @bitcast_v16i8_to_v2i64_scalar(<16 x i8> inreg %a, i32 in
; GFX11-NEXT: v_or_b32_e32 v3, v7, v8
; GFX11-NEXT: .LBB71_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB71_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX11-NEXT: s_branch .LBB71_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13594,13 +13087,13 @@ define inreg <8 x i16> @bitcast_v2f64_to_v8i16_scalar(<2 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB73_3
+; SI-NEXT: s_cbranch_scc0 .LBB73_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s11, s19, 16
; SI-NEXT: s_lshr_b32 s10, s17, 16
; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB73_4
+; SI-NEXT: s_cbranch_execnz .LBB73_3
; SI-NEXT: .LBB73_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
@@ -13608,14 +13101,8 @@ define inreg <8 x i16> @bitcast_v2f64_to_v8i16_scalar(<2 x double> inreg %a, i32
; SI-NEXT: v_lshr_b64 v[5:6], v[0:1], 16
; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v1
-; SI-NEXT: s_branch .LBB73_5
+; SI-NEXT: s_branch .LBB73_4
; SI-NEXT: .LBB73_3:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: s_branch .LBB73_2
-; SI-NEXT: .LBB73_4:
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -13624,7 +13111,7 @@ define inreg <8 x i16> @bitcast_v2f64_to_v8i16_scalar(<2 x double> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s10
; SI-NEXT: v_mov_b32_e32 v5, s6
; SI-NEXT: v_mov_b32_e32 v4, s4
-; SI-NEXT: .LBB73_5: ; %end
+; SI-NEXT: .LBB73_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -13643,16 +13130,14 @@ define inreg <8 x i16> @bitcast_v2f64_to_v8i16_scalar(<2 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB73_3
+; VI-NEXT: s_cbranch_scc0 .LBB73_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB73_4
+; VI-NEXT: s_cbranch_execnz .LBB73_3
; VI-NEXT: .LBB73_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB73_3:
-; VI-NEXT: s_branch .LBB73_2
-; VI-NEXT: .LBB73_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -13663,16 +13148,14 @@ define inreg <8 x i16> @bitcast_v2f64_to_v8i16_scalar(<2 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB73_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB73_4
+; GFX9-NEXT: s_cbranch_execnz .LBB73_3
; GFX9-NEXT: .LBB73_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB73_3:
-; GFX9-NEXT: s_branch .LBB73_2
-; GFX9-NEXT: .LBB73_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -13684,17 +13167,15 @@ define inreg <8 x i16> @bitcast_v2f64_to_v8i16_scalar(<2 x double> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB73_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB73_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB73_3
; GFX11-NEXT: .LBB73_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB73_3:
-; GFX11-NEXT: s_branch .LBB73_2
-; GFX11-NEXT: .LBB73_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -13865,7 +13346,7 @@ define inreg <2 x double> @bitcast_v8i16_to_v2f64_scalar(<8 x i16> inreg %a, i32
; SI-NEXT: s_lshr_b32 s12, s17, 16
; SI-NEXT: s_lshr_b32 s13, s16, 16
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB75_4
+; SI-NEXT: s_cbranch_scc0 .LBB75_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s13, 16
@@ -13907,15 +13388,12 @@ define inreg <2 x double> @bitcast_v8i16_to_v2f64_scalar(<8 x i16> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v3, s7
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB75_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; SI-NEXT: s_branch .LBB75_2
;
; VI-LABEL: bitcast_v8i16_to_v2f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB75_4
+; VI-NEXT: s_cbranch_scc0 .LBB75_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB75_3
; VI-NEXT: .LBB75_2: ; %cmp.true
@@ -13945,16 +13423,14 @@ define inreg <2 x double> @bitcast_v8i16_to_v2f64_scalar(<8 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB75_4:
-; VI-NEXT: s_branch .LBB75_2
;
; GFX9-LABEL: bitcast_v8i16_to_v2f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB75_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB75_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB75_4
+; GFX9-NEXT: s_cbranch_execnz .LBB75_3
; GFX9-NEXT: .LBB75_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
@@ -13962,8 +13438,6 @@ define inreg <2 x double> @bitcast_v8i16_to_v2f64_scalar(<8 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB75_3:
-; GFX9-NEXT: s_branch .LBB75_2
-; GFX9-NEXT: .LBB75_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -13975,10 +13449,10 @@ define inreg <2 x double> @bitcast_v8i16_to_v2f64_scalar(<8 x i16> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB75_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB75_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB75_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB75_3
; GFX11-NEXT: .LBB75_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
@@ -13986,8 +13460,6 @@ define inreg <2 x double> @bitcast_v8i16_to_v2f64_scalar(<8 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB75_3:
-; GFX11-NEXT: s_branch .LBB75_2
-; GFX11-NEXT: .LBB75_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -14116,13 +13588,13 @@ define inreg <8 x half> @bitcast_v2f64_to_v8f16_scalar(<2 x double> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB77_3
+; SI-NEXT: s_cbranch_scc0 .LBB77_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s11, s19, 16
; SI-NEXT: s_lshr_b32 s10, s17, 16
; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB77_4
+; SI-NEXT: s_cbranch_execnz .LBB77_3
; SI-NEXT: .LBB77_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
@@ -14130,14 +13602,8 @@ define inreg <8 x half> @bitcast_v2f64_to_v8f16_scalar(<2 x double> inreg %a, i3
; SI-NEXT: v_lshr_b64 v[5:6], v[0:1], 16
; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v1
-; SI-NEXT: s_branch .LBB77_5
+; SI-NEXT: s_branch .LBB77_4
; SI-NEXT: .LBB77_3:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: s_branch .LBB77_2
-; SI-NEXT: .LBB77_4:
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -14146,7 +13612,7 @@ define inreg <8 x half> @bitcast_v2f64_to_v8f16_scalar(<2 x double> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v6, s10
; SI-NEXT: v_mov_b32_e32 v5, s6
; SI-NEXT: v_mov_b32_e32 v4, s4
-; SI-NEXT: .LBB77_5: ; %end
+; SI-NEXT: .LBB77_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -14165,16 +13631,14 @@ define inreg <8 x half> @bitcast_v2f64_to_v8f16_scalar(<2 x double> inreg %a, i3
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB77_3
+; VI-NEXT: s_cbranch_scc0 .LBB77_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB77_4
+; VI-NEXT: s_cbranch_execnz .LBB77_3
; VI-NEXT: .LBB77_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB77_3:
-; VI-NEXT: s_branch .LBB77_2
-; VI-NEXT: .LBB77_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -14185,16 +13649,14 @@ define inreg <8 x half> @bitcast_v2f64_to_v8f16_scalar(<2 x double> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB77_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB77_4
+; GFX9-NEXT: s_cbranch_execnz .LBB77_3
; GFX9-NEXT: .LBB77_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB77_3:
-; GFX9-NEXT: s_branch .LBB77_2
-; GFX9-NEXT: .LBB77_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -14206,17 +13668,15 @@ define inreg <8 x half> @bitcast_v2f64_to_v8f16_scalar(<2 x double> inreg %a, i3
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB77_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB77_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB77_3
; GFX11-NEXT: .LBB77_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB77_3:
-; GFX11-NEXT: s_branch .LBB77_2
-; GFX11-NEXT: .LBB77_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -14404,7 +13864,7 @@ define inreg <2 x double> @bitcast_v8f16_to_v2f64_scalar(<8 x half> inreg %a, i3
; SI-NEXT: s_lshr_b32 s12, s17, 16
; SI-NEXT: s_lshr_b32 s13, s16, 16
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB79_3
+; SI-NEXT: s_cbranch_scc0 .LBB79_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s13, 16
@@ -14418,7 +13878,7 @@ define inreg <2 x double> @bitcast_v8f16_to_v2f64_scalar(<8 x half> inreg %a, i3
; SI-NEXT: s_and_b32 s7, s19, 0xffff
; SI-NEXT: s_lshl_b32 s8, s10, 16
; SI-NEXT: s_or_b32 s7, s7, s8
-; SI-NEXT: s_cbranch_execnz .LBB79_4
+; SI-NEXT: s_cbranch_execnz .LBB79_3
; SI-NEXT: .LBB79_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s13
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -14454,9 +13914,6 @@ define inreg <2 x double> @bitcast_v8f16_to_v2f64_scalar(<8 x half> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v3, v5, v3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB79_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; SI-NEXT: s_branch .LBB79_2
-; SI-NEXT: .LBB79_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -14467,9 +13924,9 @@ define inreg <2 x double> @bitcast_v8f16_to_v2f64_scalar(<8 x half> inreg %a, i3
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB79_3
+; VI-NEXT: s_cbranch_scc0 .LBB79_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB79_4
+; VI-NEXT: s_cbranch_execnz .LBB79_3
; VI-NEXT: .LBB79_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s19, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -14494,8 +13951,6 @@ define inreg <2 x double> @bitcast_v8f16_to_v2f64_scalar(<8 x half> inreg %a, i3
; VI-NEXT: v_or_b32_e32 v0, v0, v4
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB79_3:
-; VI-NEXT: s_branch .LBB79_2
-; VI-NEXT: .LBB79_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -14506,9 +13961,9 @@ define inreg <2 x double> @bitcast_v8f16_to_v2f64_scalar(<8 x half> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB79_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB79_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB79_4
+; GFX9-NEXT: s_cbranch_execnz .LBB79_3
; GFX9-NEXT: .LBB79_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v3, s19, v0 op_sel_hi:[1,0]
@@ -14517,8 +13972,6 @@ define inreg <2 x double> @bitcast_v8f16_to_v2f64_scalar(<8 x half> inreg %a, i3
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB79_3:
-; GFX9-NEXT: s_branch .LBB79_2
-; GFX9-NEXT: .LBB79_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -14530,10 +13983,10 @@ define inreg <2 x double> @bitcast_v8f16_to_v2f64_scalar(<8 x half> inreg %a, i3
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB79_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB79_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB79_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB79_3
; GFX11-NEXT: .LBB79_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
@@ -14541,8 +13994,6 @@ define inreg <2 x double> @bitcast_v8f16_to_v2f64_scalar(<8 x half> inreg %a, i3
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB79_3:
-; GFX11-NEXT: s_branch .LBB79_2
-; GFX11-NEXT: .LBB79_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -14690,7 +14141,7 @@ define inreg <8 x bfloat> @bitcast_v2f64_to_v8bf16_scalar(<2 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB81_3
+; SI-NEXT: s_cbranch_scc0 .LBB81_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s13, s19, 0xffff0000
; SI-NEXT: s_lshl_b32 s12, s19, 16
@@ -14700,7 +14151,7 @@ define inreg <8 x bfloat> @bitcast_v2f64_to_v8bf16_scalar(<2 x double> inreg %a,
; SI-NEXT: s_lshl_b32 s8, s17, 16
; SI-NEXT: s_and_b32 s7, s16, 0xffff0000
; SI-NEXT: s_lshl_b32 s6, s16, 16
-; SI-NEXT: s_cbranch_execnz .LBB81_4
+; SI-NEXT: s_cbranch_execnz .LBB81_3
; SI-NEXT: .LBB81_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[8:9], s[16:17], 1.0
@@ -14712,18 +14163,8 @@ define inreg <8 x bfloat> @bitcast_v2f64_to_v8bf16_scalar(<2 x double> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v9
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v8
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v8
-; SI-NEXT: s_branch .LBB81_5
+; SI-NEXT: s_branch .LBB81_4
; SI-NEXT: .LBB81_3:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: s_branch .LBB81_2
-; SI-NEXT: .LBB81_4:
; SI-NEXT: v_mov_b32_e32 v5, s13
; SI-NEXT: v_mov_b32_e32 v4, s12
; SI-NEXT: v_mov_b32_e32 v6, s11
@@ -14732,7 +14173,7 @@ define inreg <8 x bfloat> @bitcast_v2f64_to_v8bf16_scalar(<2 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v2, s8
; SI-NEXT: v_mov_b32_e32 v1, s7
; SI-NEXT: v_mov_b32_e32 v0, s6
-; SI-NEXT: .LBB81_5: ; %end
+; SI-NEXT: .LBB81_4: ; %end
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
@@ -14755,16 +14196,14 @@ define inreg <8 x bfloat> @bitcast_v2f64_to_v8bf16_scalar(<2 x double> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB81_3
+; VI-NEXT: s_cbranch_scc0 .LBB81_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB81_4
+; VI-NEXT: s_cbranch_execnz .LBB81_3
; VI-NEXT: .LBB81_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB81_3:
-; VI-NEXT: s_branch .LBB81_2
-; VI-NEXT: .LBB81_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -14775,16 +14214,14 @@ define inreg <8 x bfloat> @bitcast_v2f64_to_v8bf16_scalar(<2 x double> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB81_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB81_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB81_4
+; GFX9-NEXT: s_cbranch_execnz .LBB81_3
; GFX9-NEXT: .LBB81_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB81_3:
-; GFX9-NEXT: s_branch .LBB81_2
-; GFX9-NEXT: .LBB81_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -14796,17 +14233,15 @@ define inreg <8 x bfloat> @bitcast_v2f64_to_v8bf16_scalar(<2 x double> inreg %a,
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB81_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB81_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB81_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB81_3
; GFX11-NEXT: .LBB81_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB81_3:
-; GFX11-NEXT: s_branch .LBB81_2
-; GFX11-NEXT: .LBB81_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -15272,7 +14707,7 @@ define inreg <2 x double> @bitcast_v8bf16_to_v2f64_scalar(<8 x bfloat> inreg %a,
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s7
; SI-NEXT: v_mul_f32_e64 v13, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB83_4
+; SI-NEXT: s_cbranch_scc0 .LBB83_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v16
; SI-NEXT: v_lshr_b64 v[0:1], v[11:12], 16
@@ -15310,17 +14745,14 @@ define inreg <2 x double> @bitcast_v8bf16_to_v2f64_scalar(<8 x bfloat> inreg %a,
; SI-NEXT: v_lshr_b64 v[3:4], v[3:4], 16
; SI-NEXT: .LBB83_3: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB83_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; SI-NEXT: s_branch .LBB83_2
;
; VI-LABEL: bitcast_v8bf16_to_v2f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB83_3
+; VI-NEXT: s_cbranch_scc0 .LBB83_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB83_4
+; VI-NEXT: s_cbranch_execnz .LBB83_3
; VI-NEXT: .LBB83_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s19, 16
; VI-NEXT: v_mov_b32_e32 v7, 0x40c00000
@@ -15399,8 +14831,6 @@ define inreg <2 x double> @bitcast_v8bf16_to_v2f64_scalar(<8 x bfloat> inreg %a,
; VI-NEXT: v_mov_b32_e32 v3, v4
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB83_3:
-; VI-NEXT: s_branch .LBB83_2
-; VI-NEXT: .LBB83_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -15411,9 +14841,9 @@ define inreg <2 x double> @bitcast_v8bf16_to_v2f64_scalar(<8 x bfloat> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB83_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB83_4
+; GFX9-NEXT: s_cbranch_execnz .LBB83_3
; GFX9-NEXT: .LBB83_2: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s19
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -15495,8 +14925,6 @@ define inreg <2 x double> @bitcast_v8bf16_to_v2f64_scalar(<8 x bfloat> inreg %a,
; GFX9-NEXT: v_lshl_or_b32 v0, v5, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB83_3:
-; GFX9-NEXT: s_branch .LBB83_2
-; GFX9-NEXT: .LBB83_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -15508,10 +14936,10 @@ define inreg <2 x double> @bitcast_v8bf16_to_v2f64_scalar(<8 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB83_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB83_3
; GFX11-TRUE16-NEXT: .LBB83_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s3
; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
@@ -15595,8 +15023,6 @@ define inreg <2 x double> @bitcast_v8bf16_to_v2f64_scalar(<8 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v5.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB83_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB83_2
-; GFX11-TRUE16-NEXT: .LBB83_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -15606,10 +15032,10 @@ define inreg <2 x double> @bitcast_v8bf16_to_v2f64_scalar(<8 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB83_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB83_3
; GFX11-FAKE16-NEXT: .LBB83_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s3, 16
; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s3
@@ -15698,8 +15124,6 @@ define inreg <2 x double> @bitcast_v8bf16_to_v2f64_scalar(<8 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v7, 16, v8
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB83_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB83_2
-; GFX11-FAKE16-NEXT: .LBB83_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
@@ -16043,7 +15467,7 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB85_3
+; SI-NEXT: s_cbranch_scc0 .LBB85_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s27, s19, 24
; SI-NEXT: s_lshr_b32 s26, s19, 16
@@ -16057,7 +15481,7 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 24
; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16
; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 8
-; SI-NEXT: s_cbranch_execnz .LBB85_4
+; SI-NEXT: s_cbranch_execnz .LBB85_3
; SI-NEXT: .LBB85_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[20:21], s[16:17], 1.0
; SI-NEXT: v_add_f64 v[18:19], s[18:19], 1.0
@@ -16073,22 +15497,8 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v21
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v21
; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v21
-; SI-NEXT: s_branch .LBB85_5
+; SI-NEXT: s_branch .LBB85_4
; SI-NEXT: .LBB85_3:
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr22
-; SI-NEXT: ; implicit-def: $sgpr23
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: s_branch .LBB85_2
-; SI-NEXT: .LBB85_4:
; SI-NEXT: v_mov_b32_e32 v19, s19
; SI-NEXT: v_mov_b32_e32 v21, s17
; SI-NEXT: v_mov_b32_e32 v20, s16
@@ -16105,7 +15515,7 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v9, s8
; SI-NEXT: v_mov_b32_e32 v16, s6
; SI-NEXT: v_mov_b32_e32 v11, s4
-; SI-NEXT: .LBB85_5: ; %end
+; SI-NEXT: .LBB85_4: ; %end
; SI-NEXT: v_mov_b32_e32 v2, v0
; SI-NEXT: v_mov_b32_e32 v0, v20
; SI-NEXT: v_mov_b32_e32 v4, v21
@@ -16118,7 +15528,7 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB85_3
+; VI-NEXT: s_cbranch_scc0 .LBB85_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s15, s19, 24
; VI-NEXT: s_lshr_b32 s14, s19, 16
@@ -16132,7 +15542,7 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; VI-NEXT: s_lshr_b32 s22, s16, 8
; VI-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB85_4
+; VI-NEXT: s_cbranch_execnz .LBB85_3
; VI-NEXT: .LBB85_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[16:17], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[18:19], s[16:17], 1.0
@@ -16148,22 +15558,8 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; VI-NEXT: v_lshrrev_b32_e32 v5, 8, v19
; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v18
; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v18
-; VI-NEXT: s_branch .LBB85_5
+; VI-NEXT: s_branch .LBB85_4
; VI-NEXT: .LBB85_3:
-; VI-NEXT: ; implicit-def: $sgpr22
-; VI-NEXT: ; implicit-def: $sgpr23
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr20
-; VI-NEXT: ; implicit-def: $sgpr21
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: s_branch .LBB85_2
-; VI-NEXT: .LBB85_4:
; VI-NEXT: v_mov_b32_e32 v18, s16
; VI-NEXT: v_mov_b32_e32 v16, s18
; VI-NEXT: v_mov_b32_e32 v17, s19
@@ -16180,7 +15576,7 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v7, s12
; VI-NEXT: v_mov_b32_e32 v6, s11
; VI-NEXT: v_mov_b32_e32 v5, s10
-; VI-NEXT: .LBB85_5: ; %end
+; VI-NEXT: .LBB85_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, v18
; VI-NEXT: v_mov_b32_e32 v4, v19
; VI-NEXT: v_mov_b32_e32 v8, v16
@@ -16191,7 +15587,7 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB85_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB85_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s15, s19, 24
; GFX9-NEXT: s_lshr_b32 s14, s19, 16
@@ -16205,7 +15601,7 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; GFX9-NEXT: s_lshr_b32 s22, s16, 8
; GFX9-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB85_4
+; GFX9-NEXT: s_cbranch_execnz .LBB85_3
; GFX9-NEXT: .LBB85_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[16:17], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[18:19], s[16:17], 1.0
@@ -16221,22 +15617,8 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v19
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v18
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v18
-; GFX9-NEXT: s_branch .LBB85_5
+; GFX9-NEXT: s_branch .LBB85_4
; GFX9-NEXT: .LBB85_3:
-; GFX9-NEXT: ; implicit-def: $sgpr22
-; GFX9-NEXT: ; implicit-def: $sgpr23
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr20
-; GFX9-NEXT: ; implicit-def: $sgpr21
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: s_branch .LBB85_2
-; GFX9-NEXT: .LBB85_4:
; GFX9-NEXT: v_mov_b32_e32 v18, s16
; GFX9-NEXT: v_mov_b32_e32 v16, s18
; GFX9-NEXT: v_mov_b32_e32 v17, s19
@@ -16253,7 +15635,7 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v7, s12
; GFX9-NEXT: v_mov_b32_e32 v6, s11
; GFX9-NEXT: v_mov_b32_e32 v5, s10
-; GFX9-NEXT: .LBB85_5: ; %end
+; GFX9-NEXT: .LBB85_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, v18
; GFX9-NEXT: v_mov_b32_e32 v4, v19
; GFX9-NEXT: v_mov_b32_e32 v8, v16
@@ -16265,7 +15647,7 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB85_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB85_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s14, s3, 24
; GFX11-NEXT: s_lshr_b32 s13, s3, 16
@@ -16280,7 +15662,7 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB85_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB85_3
; GFX11-NEXT: .LBB85_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[16:17], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[18:19], s[0:1], 1.0
@@ -16297,22 +15679,8 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v19
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v18
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v18
-; GFX11-NEXT: s_branch .LBB85_5
+; GFX11-NEXT: s_branch .LBB85_4
; GFX11-NEXT: .LBB85_3:
-; GFX11-NEXT: ; implicit-def: $sgpr17
-; GFX11-NEXT: ; implicit-def: $sgpr18
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr16
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: s_branch .LBB85_2
-; GFX11-NEXT: .LBB85_4:
; GFX11-NEXT: v_dual_mov_b32 v18, s0 :: v_dual_mov_b32 v17, s3
; GFX11-NEXT: v_dual_mov_b32 v16, s2 :: v_dual_mov_b32 v19, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s18 :: v_dual_mov_b32 v1, s17
@@ -16323,7 +15691,7 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; GFX11-NEXT: v_mov_b32_e32 v13, s12
; GFX11-NEXT: v_mov_b32_e32 v7, s11
; GFX11-NEXT: v_mov_b32_e32 v5, s9
-; GFX11-NEXT: .LBB85_5: ; %end
+; GFX11-NEXT: .LBB85_4: ; %end
; GFX11-NEXT: v_mov_b32_e32 v0, v18
; GFX11-NEXT: v_mov_b32_e32 v4, v19
; GFX11-NEXT: v_mov_b32_e32 v8, v16
@@ -16879,7 +16247,7 @@ define inreg <2 x double> @bitcast_v16i8_to_v2f64_scalar(<16 x i8> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s10, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s11, v0
-; SI-NEXT: s_cbranch_scc0 .LBB87_4
+; SI-NEXT: s_cbranch_scc0 .LBB87_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -16977,9 +16345,6 @@ define inreg <2 x double> @bitcast_v16i8_to_v2f64_scalar(<16 x i8> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v3, s7
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB87_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; SI-NEXT: s_branch .LBB87_2
;
; VI-LABEL: bitcast_v16i8_to_v2f64_scalar:
; VI: ; %bb.0:
@@ -16988,7 +16353,7 @@ define inreg <2 x double> @bitcast_v16i8_to_v2f64_scalar(<16 x i8> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB87_4
+; VI-NEXT: s_cbranch_scc0 .LBB87_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v3, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -17060,9 +16425,6 @@ define inreg <2 x double> @bitcast_v16i8_to_v2f64_scalar(<16 x i8> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v3
; VI-NEXT: .LBB87_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB87_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; VI-NEXT: s_branch .LBB87_2
;
; GFX9-LABEL: bitcast_v16i8_to_v2f64_scalar:
; GFX9: ; %bb.0:
@@ -17071,7 +16433,7 @@ define inreg <2 x double> @bitcast_v16i8_to_v2f64_scalar(<16 x i8> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB87_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB87_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v3, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -17140,16 +16502,13 @@ define inreg <2 x double> @bitcast_v16i8_to_v2f64_scalar(<16 x i8> inreg %a, i32
; GFX9-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB87_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB87_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX9-NEXT: s_branch .LBB87_2
;
; GFX11-LABEL: bitcast_v16i8_to_v2f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB87_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB87_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -17214,9 +16573,6 @@ define inreg <2 x double> @bitcast_v16i8_to_v2f64_scalar(<16 x i8> inreg %a, i32
; GFX11-NEXT: v_or_b32_e32 v3, v7, v8
; GFX11-NEXT: .LBB87_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB87_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX11-NEXT: s_branch .LBB87_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17400,7 +16756,7 @@ define inreg <8 x half> @bitcast_v8i16_to_v8f16_scalar(<8 x i16> inreg %a, i32 i
; SI-NEXT: s_lshr_b32 s14, s17, 16
; SI-NEXT: s_lshr_b32 s21, s16, 16
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB89_4
+; SI-NEXT: s_cbranch_scc0 .LBB89_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s5, s17, 0xffff
; SI-NEXT: s_lshl_b32 s7, s14, 16
@@ -17462,18 +16818,12 @@ define inreg <8 x half> @bitcast_v8i16_to_v8f16_scalar(<8 x i16> inreg %a, i32 i
; SI-NEXT: v_mov_b32_e32 v2, s4
; SI-NEXT: v_mov_b32_e32 v3, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB89_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: s_branch .LBB89_2
;
; VI-LABEL: bitcast_v8i16_to_v8f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB89_4
+; VI-NEXT: s_cbranch_scc0 .LBB89_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB89_3
; VI-NEXT: .LBB89_2: ; %cmp.true
@@ -17503,16 +16853,14 @@ define inreg <8 x half> @bitcast_v8i16_to_v8f16_scalar(<8 x i16> inreg %a, i32 i
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB89_4:
-; VI-NEXT: s_branch .LBB89_2
;
; GFX9-LABEL: bitcast_v8i16_to_v8f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB89_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB89_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB89_4
+; GFX9-NEXT: s_cbranch_execnz .LBB89_3
; GFX9-NEXT: .LBB89_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
@@ -17520,8 +16868,6 @@ define inreg <8 x half> @bitcast_v8i16_to_v8f16_scalar(<8 x i16> inreg %a, i32 i
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB89_3:
-; GFX9-NEXT: s_branch .LBB89_2
-; GFX9-NEXT: .LBB89_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -17533,10 +16879,10 @@ define inreg <8 x half> @bitcast_v8i16_to_v8f16_scalar(<8 x i16> inreg %a, i32 i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB89_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB89_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB89_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB89_3
; GFX11-NEXT: .LBB89_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
@@ -17544,8 +16890,6 @@ define inreg <8 x half> @bitcast_v8i16_to_v8f16_scalar(<8 x i16> inreg %a, i32 i
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB89_3:
-; GFX11-NEXT: s_branch .LBB89_2
-; GFX11-NEXT: .LBB89_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -17715,9 +17059,9 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: s_lshr_b32 s6, s17, 16
; SI-NEXT: s_lshr_b32 s9, s16, 16
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB91_3
+; SI-NEXT: s_cbranch_scc0 .LBB91_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB91_4
+; SI-NEXT: s_cbranch_execnz .LBB91_3
; SI-NEXT: .LBB91_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s9
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -17753,10 +17097,8 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: v_lshr_b64 v[6:7], v[0:1], 16
; SI-NEXT: v_lshr_b64 v[4:5], v[2:3], 16
; SI-NEXT: v_or_b32_e32 v2, v11, v2
-; SI-NEXT: s_branch .LBB91_5
+; SI-NEXT: s_branch .LBB91_4
; SI-NEXT: .LBB91_3:
-; SI-NEXT: s_branch .LBB91_2
-; SI-NEXT: .LBB91_4:
; SI-NEXT: v_mov_b32_e32 v8, s7
; SI-NEXT: v_mov_b32_e32 v10, s6
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -17765,7 +17107,7 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: v_mov_b32_e32 v9, s16
; SI-NEXT: v_mov_b32_e32 v6, s9
; SI-NEXT: v_mov_b32_e32 v4, s8
-; SI-NEXT: .LBB91_5: ; %end
+; SI-NEXT: .LBB91_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v9
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v6
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -17784,9 +17126,9 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB91_3
+; VI-NEXT: s_cbranch_scc0 .LBB91_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB91_4
+; VI-NEXT: s_cbranch_execnz .LBB91_3
; VI-NEXT: .LBB91_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -17811,8 +17153,6 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i
; VI-NEXT: v_or_b32_e32 v0, v4, v5
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB91_3:
-; VI-NEXT: s_branch .LBB91_2
-; VI-NEXT: .LBB91_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -17823,9 +17163,9 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB91_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB91_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB91_4
+; GFX9-NEXT: s_cbranch_execnz .LBB91_3
; GFX9-NEXT: .LBB91_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v3, s19, v0 op_sel_hi:[1,0]
@@ -17834,8 +17174,6 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB91_3:
-; GFX9-NEXT: s_branch .LBB91_2
-; GFX9-NEXT: .LBB91_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -17847,10 +17185,10 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB91_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB91_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB91_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB91_3
; GFX11-NEXT: .LBB91_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
@@ -17858,8 +17196,6 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB91_3:
-; GFX11-NEXT: s_branch .LBB91_2
-; GFX11-NEXT: .LBB91_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -18041,7 +17377,7 @@ define inreg <8 x bfloat> @bitcast_v8i16_to_v8bf16_scalar(<8 x i16> inreg %a, i3
; SI-NEXT: s_lshr_b32 s15, s17, 16
; SI-NEXT: s_lshr_b32 s14, s16, 16
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB93_4
+; SI-NEXT: s_cbranch_scc0 .LBB93_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s10, s16, 16
; SI-NEXT: s_lshl_b32 s13, s14, 16
@@ -18099,22 +17435,12 @@ define inreg <8 x bfloat> @bitcast_v8i16_to_v8bf16_scalar(<8 x i16> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s6
; SI-NEXT: v_lshr_b64 v[3:4], v[3:4], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB93_4:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: s_branch .LBB93_2
;
; VI-LABEL: bitcast_v8i16_to_v8bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB93_4
+; VI-NEXT: s_cbranch_scc0 .LBB93_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB93_3
; VI-NEXT: .LBB93_2: ; %cmp.true
@@ -18144,16 +17470,14 @@ define inreg <8 x bfloat> @bitcast_v8i16_to_v8bf16_scalar(<8 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB93_4:
-; VI-NEXT: s_branch .LBB93_2
;
; GFX9-LABEL: bitcast_v8i16_to_v8bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB93_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB93_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB93_4
+; GFX9-NEXT: s_cbranch_execnz .LBB93_3
; GFX9-NEXT: .LBB93_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
@@ -18161,8 +17485,6 @@ define inreg <8 x bfloat> @bitcast_v8i16_to_v8bf16_scalar(<8 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB93_3:
-; GFX9-NEXT: s_branch .LBB93_2
-; GFX9-NEXT: .LBB93_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -18174,10 +17496,10 @@ define inreg <8 x bfloat> @bitcast_v8i16_to_v8bf16_scalar(<8 x i16> inreg %a, i3
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB93_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB93_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB93_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB93_3
; GFX11-NEXT: .LBB93_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
@@ -18185,8 +17507,6 @@ define inreg <8 x bfloat> @bitcast_v8i16_to_v8bf16_scalar(<8 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB93_3:
-; GFX11-NEXT: s_branch .LBB93_2
-; GFX11-NEXT: .LBB93_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -18662,7 +17982,7 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v8, 1.0, s6
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s5
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s4
-; SI-NEXT: s_cbranch_scc0 .LBB95_4
+; SI-NEXT: s_cbranch_scc0 .LBB95_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v15
; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v1
@@ -18716,24 +18036,14 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v9
; SI-NEXT: v_or_b32_e32 v3, v3, v4
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB95_4:
-; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: ; implicit-def: $vgpr12
-; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: ; implicit-def: $vgpr13
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr9
-; SI-NEXT: s_branch .LBB95_2
;
; VI-LABEL: bitcast_v8bf16_to_v8i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB95_3
+; VI-NEXT: s_cbranch_scc0 .LBB95_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB95_4
+; VI-NEXT: s_cbranch_execnz .LBB95_3
; VI-NEXT: .LBB95_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v7, 0x40c00000
@@ -18812,8 +18122,6 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v3, v6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB95_3:
-; VI-NEXT: s_branch .LBB95_2
-; VI-NEXT: .LBB95_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -18824,9 +18132,9 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB95_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB95_4
+; GFX9-NEXT: s_cbranch_execnz .LBB95_3
; GFX9-NEXT: .LBB95_2: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s16
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -18904,8 +18212,6 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3
; GFX9-NEXT: v_and_or_b32 v0, v4, v8, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB95_3:
-; GFX9-NEXT: s_branch .LBB95_2
-; GFX9-NEXT: .LBB95_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -18917,10 +18223,10 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB95_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB95_3
; GFX11-TRUE16-NEXT: .LBB95_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s0
; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -18993,8 +18299,6 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v9.h
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB95_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB95_2
-; GFX11-TRUE16-NEXT: .LBB95_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -19004,10 +18308,10 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB95_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB95_3
; GFX11-FAKE16-NEXT: .LBB95_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s0
; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -19092,8 +18396,6 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v9
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB95_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB95_2
-; GFX11-FAKE16-NEXT: .LBB95_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
@@ -19503,7 +18805,7 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in
; SI-NEXT: s_lshr_b32 s26, s17, 16
; SI-NEXT: s_lshr_b32 s28, s16, 16
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB97_4
+; SI-NEXT: s_cbranch_scc0 .LBB97_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s28, 16
@@ -19579,26 +18881,12 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v14, s27
; SI-NEXT: v_mov_b32_e32 v15, s13
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB97_4:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr22
-; SI-NEXT: ; implicit-def: $sgpr20
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: s_branch .LBB97_2
;
; VI-LABEL: bitcast_v8i16_to_v16i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB97_4
+; VI-NEXT: s_cbranch_scc0 .LBB97_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s10, s19, 24
; VI-NEXT: s_lshr_b32 s11, s19, 16
@@ -19664,26 +18952,12 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v14, s11
; VI-NEXT: v_mov_b32_e32 v15, s10
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB97_4:
-; VI-NEXT: ; implicit-def: $sgpr23
-; VI-NEXT: ; implicit-def: $sgpr22
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr21
-; VI-NEXT: ; implicit-def: $sgpr20
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB97_2
;
; GFX9-LABEL: bitcast_v8i16_to_v16i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB97_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB97_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s10, s19, 24
; GFX9-NEXT: s_lshr_b32 s11, s19, 16
@@ -19697,7 +18971,7 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in
; GFX9-NEXT: s_lshr_b32 s23, s16, 8
; GFX9-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB97_4
+; GFX9-NEXT: s_cbranch_execnz .LBB97_3
; GFX9-NEXT: .LBB97_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v19, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v18, s16, 3 op_sel_hi:[1,0]
@@ -19715,22 +18989,8 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v19
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v18
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v18
-; GFX9-NEXT: s_branch .LBB97_5
+; GFX9-NEXT: s_branch .LBB97_4
; GFX9-NEXT: .LBB97_3:
-; GFX9-NEXT: ; implicit-def: $sgpr23
-; GFX9-NEXT: ; implicit-def: $sgpr21
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr22
-; GFX9-NEXT: ; implicit-def: $sgpr20
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB97_2
-; GFX9-NEXT: .LBB97_4:
; GFX9-NEXT: v_mov_b32_e32 v18, s16
; GFX9-NEXT: v_mov_b32_e32 v19, s17
; GFX9-NEXT: v_mov_b32_e32 v16, s18
@@ -19747,7 +19007,7 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v15, s10
; GFX9-NEXT: v_mov_b32_e32 v11, s6
; GFX9-NEXT: v_mov_b32_e32 v3, s4
-; GFX9-NEXT: .LBB97_5: ; %end
+; GFX9-NEXT: .LBB97_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, v18
; GFX9-NEXT: v_mov_b32_e32 v4, v19
; GFX9-NEXT: v_mov_b32_e32 v8, v16
@@ -19759,7 +19019,7 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB97_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB97_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s9, s3, 24
; GFX11-NEXT: s_lshr_b32 s10, s3, 16
@@ -19774,7 +19034,7 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB97_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB97_3
; GFX11-NEXT: .LBB97_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v19, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v17, s3, 3 op_sel_hi:[1,0]
@@ -19794,22 +19054,8 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v19
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v18
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v18
-; GFX11-NEXT: s_branch .LBB97_5
+; GFX11-NEXT: s_branch .LBB97_4
; GFX11-NEXT: .LBB97_3:
-; GFX11-NEXT: ; implicit-def: $sgpr18
-; GFX11-NEXT: ; implicit-def: $sgpr16
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr17
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: s_branch .LBB97_2
-; GFX11-NEXT: .LBB97_4:
; GFX11-NEXT: v_dual_mov_b32 v18, s0 :: v_dual_mov_b32 v19, s1
; GFX11-NEXT: v_dual_mov_b32 v16, s2 :: v_dual_mov_b32 v17, s3
; GFX11-NEXT: v_dual_mov_b32 v1, s18 :: v_dual_mov_b32 v2, s16
@@ -19820,7 +19066,7 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in
; GFX11-NEXT: v_mov_b32_e32 v15, s9
; GFX11-NEXT: v_mov_b32_e32 v11, s6
; GFX11-NEXT: v_mov_b32_e32 v3, s4
-; GFX11-NEXT: .LBB97_5: ; %end
+; GFX11-NEXT: .LBB97_4: ; %end
; GFX11-NEXT: v_mov_b32_e32 v0, v18
; GFX11-NEXT: v_mov_b32_e32 v4, v19
; GFX11-NEXT: v_mov_b32_e32 v8, v16
@@ -20397,7 +19643,7 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in
; SI-NEXT: v_readfirstlane_b32 s14, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s15, v0
-; SI-NEXT: s_cbranch_scc0 .LBB99_4
+; SI-NEXT: s_cbranch_scc0 .LBB99_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -20517,14 +19763,6 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v2, s4
; SI-NEXT: v_mov_b32_e32 v3, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB99_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: s_branch .LBB99_2
;
; VI-LABEL: bitcast_v16i8_to_v8i16_scalar:
; VI: ; %bb.0:
@@ -20533,7 +19771,7 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB99_4
+; VI-NEXT: s_cbranch_scc0 .LBB99_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v3, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -20605,9 +19843,6 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v3
; VI-NEXT: .LBB99_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB99_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; VI-NEXT: s_branch .LBB99_2
;
; GFX9-LABEL: bitcast_v16i8_to_v8i16_scalar:
; GFX9: ; %bb.0:
@@ -20616,7 +19851,7 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB99_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB99_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v3, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -20685,16 +19920,13 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in
; GFX9-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB99_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB99_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX9-NEXT: s_branch .LBB99_2
;
; GFX11-LABEL: bitcast_v16i8_to_v8i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB99_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB99_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -20759,9 +19991,6 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in
; GFX11-NEXT: v_or_b32_e32 v3, v7, v8
; GFX11-NEXT: .LBB99_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB99_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX11-NEXT: s_branch .LBB99_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -20956,7 +20185,7 @@ define inreg <8 x bfloat> @bitcast_v8f16_to_v8bf16_scalar(<8 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s7, s17, 16
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB101_3
+; SI-NEXT: s_cbranch_scc0 .LBB101_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s10, s16, 16
; SI-NEXT: s_lshl_b32 s11, s6, 16
@@ -20966,7 +20195,7 @@ define inreg <8 x bfloat> @bitcast_v8f16_to_v8bf16_scalar(<8 x half> inreg %a, i
; SI-NEXT: s_lshl_b32 s15, s8, 16
; SI-NEXT: s_lshl_b32 s20, s19, 16
; SI-NEXT: s_lshl_b32 s21, s9, 16
-; SI-NEXT: s_cbranch_execnz .LBB101_4
+; SI-NEXT: s_cbranch_execnz .LBB101_3
; SI-NEXT: .LBB101_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s9
; SI-NEXT: v_cvt_f32_f16_e32 v1, s19
@@ -21000,18 +20229,8 @@ define inreg <8 x bfloat> @bitcast_v8f16_to_v8bf16_scalar(<8 x half> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; SI-NEXT: s_branch .LBB101_5
+; SI-NEXT: s_branch .LBB101_4
; SI-NEXT: .LBB101_3:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr20
-; SI-NEXT: ; implicit-def: $sgpr21
-; SI-NEXT: s_branch .LBB101_2
-; SI-NEXT: .LBB101_4:
; SI-NEXT: v_mov_b32_e32 v5, s21
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v6, s15
@@ -21020,7 +20239,7 @@ define inreg <8 x bfloat> @bitcast_v8f16_to_v8bf16_scalar(<8 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v2, s12
; SI-NEXT: v_mov_b32_e32 v1, s11
; SI-NEXT: v_mov_b32_e32 v0, s10
-; SI-NEXT: .LBB101_5: ; %end
+; SI-NEXT: .LBB101_4: ; %end
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
@@ -21043,9 +20262,9 @@ define inreg <8 x bfloat> @bitcast_v8f16_to_v8bf16_scalar(<8 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB101_3
+; VI-NEXT: s_cbranch_scc0 .LBB101_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB101_4
+; VI-NEXT: s_cbranch_execnz .LBB101_3
; VI-NEXT: .LBB101_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -21070,8 +20289,6 @@ define inreg <8 x bfloat> @bitcast_v8f16_to_v8bf16_scalar(<8 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v4, v5
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB101_3:
-; VI-NEXT: s_branch .LBB101_2
-; VI-NEXT: .LBB101_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -21082,9 +20299,9 @@ define inreg <8 x bfloat> @bitcast_v8f16_to_v8bf16_scalar(<8 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB101_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB101_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB101_4
+; GFX9-NEXT: s_cbranch_execnz .LBB101_3
; GFX9-NEXT: .LBB101_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v3, s19, v0 op_sel_hi:[1,0]
@@ -21093,8 +20310,6 @@ define inreg <8 x bfloat> @bitcast_v8f16_to_v8bf16_scalar(<8 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB101_3:
-; GFX9-NEXT: s_branch .LBB101_2
-; GFX9-NEXT: .LBB101_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -21106,10 +20321,10 @@ define inreg <8 x bfloat> @bitcast_v8f16_to_v8bf16_scalar(<8 x half> inreg %a, i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB101_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB101_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB101_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB101_3
; GFX11-NEXT: .LBB101_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
@@ -21117,8 +20332,6 @@ define inreg <8 x bfloat> @bitcast_v8f16_to_v8bf16_scalar(<8 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB101_3:
-; GFX11-NEXT: s_branch .LBB101_2
-; GFX11-NEXT: .LBB101_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -21607,7 +20820,7 @@ define inreg <8 x half> @bitcast_v8bf16_to_v8f16_scalar(<8 x bfloat> inreg %a, i
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s5
; SI-NEXT: v_mul_f32_e64 v16, 1.0, s11
; SI-NEXT: v_mul_f32_e64 v11, 1.0, s7
-; SI-NEXT: s_cbranch_scc0 .LBB103_4
+; SI-NEXT: s_cbranch_scc0 .LBB103_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v20
; SI-NEXT: v_lshr_b64 v[5:6], v[0:1], 16
@@ -21665,22 +20878,14 @@ define inreg <8 x half> @bitcast_v8bf16_to_v8f16_scalar(<8 x bfloat> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v8
; SI-NEXT: v_or_b32_e32 v3, v3, v4
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB103_4:
-; SI-NEXT: ; implicit-def: $vgpr9
-; SI-NEXT: ; implicit-def: $vgpr14
-; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr12
-; SI-NEXT: ; implicit-def: $vgpr18
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: s_branch .LBB103_2
;
; VI-LABEL: bitcast_v8bf16_to_v8f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB103_3
+; VI-NEXT: s_cbranch_scc0 .LBB103_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB103_4
+; VI-NEXT: s_cbranch_execnz .LBB103_3
; VI-NEXT: .LBB103_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v7, 0x40c00000
@@ -21759,8 +20964,6 @@ define inreg <8 x half> @bitcast_v8bf16_to_v8f16_scalar(<8 x bfloat> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v3, v6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB103_3:
-; VI-NEXT: s_branch .LBB103_2
-; VI-NEXT: .LBB103_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -21771,9 +20974,9 @@ define inreg <8 x half> @bitcast_v8bf16_to_v8f16_scalar(<8 x bfloat> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB103_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB103_4
+; GFX9-NEXT: s_cbranch_execnz .LBB103_3
; GFX9-NEXT: .LBB103_2: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s16
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -21855,8 +21058,6 @@ define inreg <8 x half> @bitcast_v8bf16_to_v8f16_scalar(<8 x bfloat> inreg %a, i
; GFX9-NEXT: v_lshl_or_b32 v0, v4, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB103_3:
-; GFX9-NEXT: s_branch .LBB103_2
-; GFX9-NEXT: .LBB103_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -21868,10 +21069,10 @@ define inreg <8 x half> @bitcast_v8bf16_to_v8f16_scalar(<8 x bfloat> inreg %a, i
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB103_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB103_3
; GFX11-TRUE16-NEXT: .LBB103_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s0
; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -21955,8 +21156,6 @@ define inreg <8 x half> @bitcast_v8bf16_to_v8f16_scalar(<8 x bfloat> inreg %a, i
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v7.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB103_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB103_2
-; GFX11-TRUE16-NEXT: .LBB103_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -21966,10 +21165,10 @@ define inreg <8 x half> @bitcast_v8bf16_to_v8f16_scalar(<8 x bfloat> inreg %a, i
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB103_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB103_3
; GFX11-FAKE16-NEXT: .LBB103_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s0
; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -22062,8 +21261,6 @@ define inreg <8 x half> @bitcast_v8bf16_to_v8f16_scalar(<8 x bfloat> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v6, 16, v7
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB103_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB103_2
-; GFX11-FAKE16-NEXT: .LBB103_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
@@ -22477,7 +21674,7 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: s_lshr_b32 s26, s17, 16
; SI-NEXT: s_lshr_b32 s27, s16, 16
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB105_3
+; SI-NEXT: s_cbranch_scc0 .LBB105_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s27, 16
@@ -22501,7 +21698,7 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: s_lshr_b32 s9, s13, 8
; SI-NEXT: s_bfe_u32 s11, s26, 0x80008
; SI-NEXT: s_bfe_u32 s15, s28, 0x80008
-; SI-NEXT: s_cbranch_execnz .LBB105_4
+; SI-NEXT: s_cbranch_execnz .LBB105_3
; SI-NEXT: .LBB105_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s29
; SI-NEXT: v_cvt_f32_f16_e32 v1, s18
@@ -22545,22 +21742,8 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v18
; SI-NEXT: v_bfe_u32 v7, v6, 8, 8
; SI-NEXT: v_bfe_u32 v15, v14, 8, 8
-; SI-NEXT: s_branch .LBB105_5
+; SI-NEXT: s_branch .LBB105_4
; SI-NEXT: .LBB105_3:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr22
-; SI-NEXT: ; implicit-def: $sgpr20
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: s_branch .LBB105_2
-; SI-NEXT: .LBB105_4:
; SI-NEXT: v_mov_b32_e32 v14, s28
; SI-NEXT: v_mov_b32_e32 v6, s26
; SI-NEXT: v_mov_b32_e32 v15, s15
@@ -22577,7 +21760,7 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: v_mov_b32_e32 v11, s14
; SI-NEXT: v_mov_b32_e32 v12, s20
; SI-NEXT: v_mov_b32_e32 v9, s22
-; SI-NEXT: .LBB105_5: ; %end
+; SI-NEXT: .LBB105_4: ; %end
; SI-NEXT: v_mov_b32_e32 v2, v0
; SI-NEXT: v_mov_b32_e32 v0, v19
; SI-NEXT: v_mov_b32_e32 v4, v20
@@ -22590,7 +21773,7 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB105_3
+; VI-NEXT: s_cbranch_scc0 .LBB105_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s14, s19, 24
; VI-NEXT: s_lshr_b32 s20, s19, 16
@@ -22604,7 +21787,7 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; VI-NEXT: s_lshr_b32 s12, s16, 8
; VI-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB105_4
+; VI-NEXT: s_cbranch_execnz .LBB105_3
; VI-NEXT: .LBB105_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v1, 0x200
@@ -22635,22 +21818,8 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v18
; VI-NEXT: v_bfe_u32 v15, v14, 8, 8
; VI-NEXT: v_bfe_u32 v7, v6, 8, 8
-; VI-NEXT: s_branch .LBB105_5
+; VI-NEXT: s_branch .LBB105_4
; VI-NEXT: .LBB105_3:
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr23
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr22
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr21
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr20
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB105_2
-; VI-NEXT: .LBB105_4:
; VI-NEXT: v_mov_b32_e32 v2, s23
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v10, s21
@@ -22667,7 +21836,7 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; VI-NEXT: v_mov_b32_e32 v5, s10
; VI-NEXT: v_mov_b32_e32 v11, s6
; VI-NEXT: v_mov_b32_e32 v3, s4
-; VI-NEXT: .LBB105_5: ; %end
+; VI-NEXT: .LBB105_4: ; %end
; VI-NEXT: v_mov_b32_e32 v4, v17
; VI-NEXT: v_mov_b32_e32 v12, v16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -22676,7 +21845,7 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB105_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB105_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s10, s19, 24
; GFX9-NEXT: s_lshr_b32 s11, s19, 16
@@ -22690,7 +21859,7 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; GFX9-NEXT: s_lshr_b32 s23, s16, 8
; GFX9-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB105_4
+; GFX9-NEXT: s_cbranch_execnz .LBB105_3
; GFX9-NEXT: .LBB105_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v19, s17, v0 op_sel_hi:[1,0]
@@ -22709,22 +21878,8 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v19
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v18
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v18
-; GFX9-NEXT: s_branch .LBB105_5
+; GFX9-NEXT: s_branch .LBB105_4
; GFX9-NEXT: .LBB105_3:
-; GFX9-NEXT: ; implicit-def: $sgpr23
-; GFX9-NEXT: ; implicit-def: $sgpr21
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr22
-; GFX9-NEXT: ; implicit-def: $sgpr20
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB105_2
-; GFX9-NEXT: .LBB105_4:
; GFX9-NEXT: v_mov_b32_e32 v18, s16
; GFX9-NEXT: v_mov_b32_e32 v19, s17
; GFX9-NEXT: v_mov_b32_e32 v16, s18
@@ -22741,7 +21896,7 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; GFX9-NEXT: v_mov_b32_e32 v15, s10
; GFX9-NEXT: v_mov_b32_e32 v11, s6
; GFX9-NEXT: v_mov_b32_e32 v3, s4
-; GFX9-NEXT: .LBB105_5: ; %end
+; GFX9-NEXT: .LBB105_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, v18
; GFX9-NEXT: v_mov_b32_e32 v4, v19
; GFX9-NEXT: v_mov_b32_e32 v8, v16
@@ -22753,7 +21908,7 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB105_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB105_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s9, s3, 24
; GFX11-NEXT: s_lshr_b32 s10, s3, 16
@@ -22768,7 +21923,7 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB105_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB105_3
; GFX11-NEXT: .LBB105_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v19, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s3 op_sel_hi:[0,1]
@@ -22788,22 +21943,8 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v19
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v18
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v18
-; GFX11-NEXT: s_branch .LBB105_5
+; GFX11-NEXT: s_branch .LBB105_4
; GFX11-NEXT: .LBB105_3:
-; GFX11-NEXT: ; implicit-def: $sgpr18
-; GFX11-NEXT: ; implicit-def: $sgpr16
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr17
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: s_branch .LBB105_2
-; GFX11-NEXT: .LBB105_4:
; GFX11-NEXT: v_dual_mov_b32 v18, s0 :: v_dual_mov_b32 v19, s1
; GFX11-NEXT: v_dual_mov_b32 v16, s2 :: v_dual_mov_b32 v17, s3
; GFX11-NEXT: v_dual_mov_b32 v1, s18 :: v_dual_mov_b32 v2, s16
@@ -22814,7 +21955,7 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; GFX11-NEXT: v_mov_b32_e32 v15, s9
; GFX11-NEXT: v_mov_b32_e32 v11, s6
; GFX11-NEXT: v_mov_b32_e32 v3, s4
-; GFX11-NEXT: .LBB105_5: ; %end
+; GFX11-NEXT: .LBB105_4: ; %end
; GFX11-NEXT: v_mov_b32_e32 v0, v18
; GFX11-NEXT: v_mov_b32_e32 v4, v19
; GFX11-NEXT: v_mov_b32_e32 v8, v16
@@ -23391,7 +22532,7 @@ define inreg <8 x half> @bitcast_v16i8_to_v8f16_scalar(<16 x i8> inreg %a, i32 i
; SI-NEXT: v_readfirstlane_b32 s14, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s15, v0
-; SI-NEXT: s_cbranch_scc0 .LBB107_4
+; SI-NEXT: s_cbranch_scc0 .LBB107_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -23511,14 +22652,6 @@ define inreg <8 x half> @bitcast_v16i8_to_v8f16_scalar(<16 x i8> inreg %a, i32 i
; SI-NEXT: v_mov_b32_e32 v2, s4
; SI-NEXT: v_mov_b32_e32 v3, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB107_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: s_branch .LBB107_2
;
; VI-LABEL: bitcast_v16i8_to_v8f16_scalar:
; VI: ; %bb.0:
@@ -23527,7 +22660,7 @@ define inreg <8 x half> @bitcast_v16i8_to_v8f16_scalar(<16 x i8> inreg %a, i32 i
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB107_4
+; VI-NEXT: s_cbranch_scc0 .LBB107_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v3, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -23599,9 +22732,6 @@ define inreg <8 x half> @bitcast_v16i8_to_v8f16_scalar(<16 x i8> inreg %a, i32 i
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v3
; VI-NEXT: .LBB107_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB107_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; VI-NEXT: s_branch .LBB107_2
;
; GFX9-LABEL: bitcast_v16i8_to_v8f16_scalar:
; GFX9: ; %bb.0:
@@ -23610,7 +22740,7 @@ define inreg <8 x half> @bitcast_v16i8_to_v8f16_scalar(<16 x i8> inreg %a, i32 i
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB107_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB107_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v3, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -23679,16 +22809,13 @@ define inreg <8 x half> @bitcast_v16i8_to_v8f16_scalar(<16 x i8> inreg %a, i32 i
; GFX9-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB107_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB107_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX9-NEXT: s_branch .LBB107_2
;
; GFX11-LABEL: bitcast_v16i8_to_v8f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB107_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB107_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -23753,9 +22880,6 @@ define inreg <8 x half> @bitcast_v16i8_to_v8f16_scalar(<16 x i8> inreg %a, i32 i
; GFX11-NEXT: v_or_b32_e32 v3, v7, v8
; GFX11-NEXT: .LBB107_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB107_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX11-NEXT: s_branch .LBB107_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -24453,7 +23577,7 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v25, 1.0, s7
; SI-NEXT: v_mul_f32_e64 v29, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v13, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB109_4
+; SI-NEXT: s_cbranch_scc0 .LBB109_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v28
; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v30
@@ -24520,26 +23644,12 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v12, v23
; SI-NEXT: v_mov_b32_e32 v13, v16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB109_4:
-; SI-NEXT: ; implicit-def: $vgpr19
-; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr9
-; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: ; implicit-def: $vgpr22
-; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: ; implicit-def: $vgpr10
-; SI-NEXT: ; implicit-def: $vgpr16
-; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: ; implicit-def: $vgpr17
-; SI-NEXT: s_branch .LBB109_2
;
; VI-LABEL: bitcast_v8bf16_to_v16i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB109_3
+; VI-NEXT: s_cbranch_scc0 .LBB109_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s21, s19, 24
; VI-NEXT: s_lshr_b32 s20, s19, 16
@@ -24553,7 +23663,7 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; VI-NEXT: s_lshr_b32 s13, s16, 8
; VI-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB109_4
+; VI-NEXT: s_cbranch_execnz .LBB109_3
; VI-NEXT: .LBB109_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v6, 0x40c00000
@@ -24642,22 +23752,8 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; VI-NEXT: v_lshrrev_b32_e32 v5, 8, v4
; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
; VI-NEXT: v_lshrrev_b32_e32 v11, 8, v0
-; VI-NEXT: s_branch .LBB109_5
+; VI-NEXT: s_branch .LBB109_4
; VI-NEXT: .LBB109_3:
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr22
-; VI-NEXT: ; implicit-def: $sgpr23
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr20
-; VI-NEXT: ; implicit-def: $sgpr21
-; VI-NEXT: s_branch .LBB109_2
-; VI-NEXT: .LBB109_4:
; VI-NEXT: v_mov_b32_e32 v8, s18
; VI-NEXT: v_mov_b32_e32 v12, s19
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -24674,7 +23770,7 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v5, s10
; VI-NEXT: v_mov_b32_e32 v16, s6
; VI-NEXT: v_mov_b32_e32 v17, s4
-; VI-NEXT: .LBB109_5: ; %end
+; VI-NEXT: .LBB109_4: ; %end
; VI-NEXT: v_mov_b32_e32 v3, v17
; VI-NEXT: v_mov_b32_e32 v1, v11
; VI-NEXT: v_mov_b32_e32 v11, v16
@@ -24684,7 +23780,7 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB109_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s12, s19, 24
; GFX9-NEXT: s_lshr_b32 s23, s19, 16
@@ -24698,7 +23794,7 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s13, s16, 8
; GFX9-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB109_4
+; GFX9-NEXT: s_cbranch_execnz .LBB109_3
; GFX9-NEXT: .LBB109_2: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s17
; GFX9-NEXT: v_mov_b32_e32 v3, 0x40c00000
@@ -24791,22 +23887,8 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: s_branch .LBB109_5
+; GFX9-NEXT: s_branch .LBB109_4
; GFX9-NEXT: .LBB109_3:
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr22
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr20
-; GFX9-NEXT: ; implicit-def: $sgpr21
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr23
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: s_branch .LBB109_2
-; GFX9-NEXT: .LBB109_4:
; GFX9-NEXT: v_mov_b32_e32 v8, s18
; GFX9-NEXT: v_mov_b32_e32 v16, s19
; GFX9-NEXT: v_mov_b32_e32 v14, s23
@@ -24823,7 +23905,7 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v5, s11
; GFX9-NEXT: v_mov_b32_e32 v11, s6
; GFX9-NEXT: v_mov_b32_e32 v3, s4
-; GFX9-NEXT: .LBB109_5: ; %end
+; GFX9-NEXT: .LBB109_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v4, v17
; GFX9-NEXT: v_mov_b32_e32 v12, v16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -24833,7 +23915,7 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB109_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s3, 24
; GFX11-TRUE16-NEXT: s_lshr_b32 s18, s3, 16
@@ -24848,7 +23930,7 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-TRUE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB109_3
; GFX11-TRUE16-NEXT: .LBB109_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s1, 16
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s1, 0, s1
@@ -24945,22 +24027,8 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[11:12], 24, v[9:10]
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v9
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 8, v9
-; GFX11-TRUE16-NEXT: s_branch .LBB109_5
+; GFX11-TRUE16-NEXT: s_branch .LBB109_4
; GFX11-TRUE16-NEXT: .LBB109_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr17
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr16
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr18
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-TRUE16-NEXT: s_branch .LBB109_2
-; GFX11-TRUE16-NEXT: .LBB109_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v17, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s3 :: v_dual_mov_b32 v9, s15
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s18 :: v_dual_mov_b32 v15, s11
@@ -24970,7 +24038,7 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s13 :: v_dual_mov_b32 v5, s10
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v11, s6
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s4
-; GFX11-TRUE16-NEXT: .LBB109_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB109_4: ; %end
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v17
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v12, v16
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -24980,7 +24048,7 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB109_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s3, 24
; GFX11-FAKE16-NEXT: s_lshr_b32 s18, s3, 16
@@ -24995,7 +24063,7 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-FAKE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB109_3
; GFX11-FAKE16-NEXT: .LBB109_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s1, 16
; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s1, 0, s1
@@ -25092,22 +24160,8 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[11:12], 24, v[9:10]
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v9
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 8, v9
-; GFX11-FAKE16-NEXT: s_branch .LBB109_5
+; GFX11-FAKE16-NEXT: s_branch .LBB109_4
; GFX11-FAKE16-NEXT: .LBB109_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr17
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr16
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr18
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-FAKE16-NEXT: s_branch .LBB109_2
-; GFX11-FAKE16-NEXT: .LBB109_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v17, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s3 :: v_dual_mov_b32 v9, s15
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s18 :: v_dual_mov_b32 v15, s11
@@ -25117,7 +24171,7 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s13 :: v_dual_mov_b32 v5, s10
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v11, s6
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, s4
-; GFX11-FAKE16-NEXT: .LBB109_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB109_4: ; %end
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v17
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v12, v16
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
@@ -25694,7 +24748,7 @@ define inreg <8 x bfloat> @bitcast_v16i8_to_v8bf16_scalar(<16 x i8> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s12, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s14, v0
-; SI-NEXT: s_cbranch_scc0 .LBB111_4
+; SI-NEXT: s_cbranch_scc0 .LBB111_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s4, s4, 16
@@ -25808,16 +24862,6 @@ define inreg <8 x bfloat> @bitcast_v16i8_to_v8bf16_scalar(<16 x i8> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s11
; SI-NEXT: v_lshr_b64 v[3:4], v[3:4], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB111_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: s_branch .LBB111_2
;
; VI-LABEL: bitcast_v16i8_to_v8bf16_scalar:
; VI: ; %bb.0:
@@ -25826,7 +24870,7 @@ define inreg <8 x bfloat> @bitcast_v16i8_to_v8bf16_scalar(<16 x i8> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB111_4
+; VI-NEXT: s_cbranch_scc0 .LBB111_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v3, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -25898,9 +24942,6 @@ define inreg <8 x bfloat> @bitcast_v16i8_to_v8bf16_scalar(<16 x i8> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v3
; VI-NEXT: .LBB111_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB111_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; VI-NEXT: s_branch .LBB111_2
;
; GFX9-LABEL: bitcast_v16i8_to_v8bf16_scalar:
; GFX9: ; %bb.0:
@@ -25909,7 +24950,7 @@ define inreg <8 x bfloat> @bitcast_v16i8_to_v8bf16_scalar(<16 x i8> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB111_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB111_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v3, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -25978,16 +25019,13 @@ define inreg <8 x bfloat> @bitcast_v16i8_to_v8bf16_scalar(<16 x i8> inreg %a, i3
; GFX9-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB111_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB111_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX9-NEXT: s_branch .LBB111_2
;
; GFX11-LABEL: bitcast_v16i8_to_v8bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB111_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB111_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -26052,9 +25090,6 @@ define inreg <8 x bfloat> @bitcast_v16i8_to_v8bf16_scalar(<16 x i8> inreg %a, i3
; GFX11-NEXT: v_or_b32_e32 v3, v7, v8
; GFX11-NEXT: .LBB111_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB111_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX11-NEXT: s_branch .LBB111_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.160bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.160bit.ll
index 430a93d9e9bf0..39755ee3707d9 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.160bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.160bit.ll
@@ -97,7 +97,7 @@ define inreg <5 x float> @bitcast_v5i32_to_v5f32_scalar(<5 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s21, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -113,14 +113,12 @@ define inreg <5 x float> @bitcast_v5i32_to_v5f32_scalar(<5 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v5i32_to_v5f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -136,14 +134,12 @@ define inreg <5 x float> @bitcast_v5i32_to_v5f32_scalar(<5 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v5i32_to_v5f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -159,15 +155,13 @@ define inreg <5 x float> @bitcast_v5i32_to_v5f32_scalar(<5 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v5i32_to_v5f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -183,8 +177,6 @@ define inreg <5 x float> @bitcast_v5i32_to_v5f32_scalar(<5 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_mov_b32_e32 v4, s16
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -291,9 +283,9 @@ define inreg <5 x i32> @bitcast_v5f32_to_v5i32_scalar(<5 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s21, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v4, s20, 1.0
; SI-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -302,8 +294,6 @@ define inreg <5 x i32> @bitcast_v5f32_to_v5i32_scalar(<5 x float> inreg %a, i32
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -315,9 +305,9 @@ define inreg <5 x i32> @bitcast_v5f32_to_v5i32_scalar(<5 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v4, s20, 1.0
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -326,8 +316,6 @@ define inreg <5 x i32> @bitcast_v5f32_to_v5i32_scalar(<5 x float> inreg %a, i32
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -339,9 +327,9 @@ define inreg <5 x i32> @bitcast_v5f32_to_v5i32_scalar(<5 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v4, s20, 1.0
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -350,8 +338,6 @@ define inreg <5 x i32> @bitcast_v5f32_to_v5i32_scalar(<5 x float> inreg %a, i32
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -365,10 +351,10 @@ define inreg <5 x i32> @bitcast_v5f32_to_v5i32_scalar(<5 x float> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v4, s4, 1.0
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
@@ -377,8 +363,6 @@ define inreg <5 x i32> @bitcast_v5f32_to_v5i32_scalar(<5 x float> inreg %a, i32
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_mov_b32_e32 v4, s4
@@ -524,7 +508,7 @@ define inreg <10 x i16> @bitcast_v5i32_to_v10i16_scalar(<5 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s21, 0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s12, s19, 16
; SI-NEXT: s_lshr_b32 s13, s17, 16
@@ -565,19 +549,12 @@ define inreg <10 x i16> @bitcast_v5i32_to_v10i16_scalar(<5 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v3, s8
; SI-NEXT: v_mov_b32_e32 v4, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v5i32_to_v10i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -593,14 +570,12 @@ define inreg <10 x i16> @bitcast_v5i32_to_v10i16_scalar(<5 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v5i32_to_v10i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -616,15 +591,13 @@ define inreg <10 x i16> @bitcast_v5i32_to_v10i16_scalar(<5 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v5i32_to_v10i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -640,8 +613,6 @@ define inreg <10 x i16> @bitcast_v5i32_to_v10i16_scalar(<5 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_mov_b32_e32 v4, s16
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -827,7 +798,7 @@ define inreg <5 x i32> @bitcast_v10i16_to_v5i32_scalar(<10 x i16> inreg %a, i32
; SI-NEXT: s_lshr_b32 s14, s17, 16
; SI-NEXT: s_lshr_b32 s15, s16, 16
; SI-NEXT: s_cmp_lg_u32 s21, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s15, 16
@@ -878,15 +849,12 @@ define inreg <5 x i32> @bitcast_v10i16_to_v5i32_scalar(<10 x i16> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v3, s7
; SI-NEXT: v_mov_b32_e32 v4, s8
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v10i16_to_v5i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -922,16 +890,14 @@ define inreg <5 x i32> @bitcast_v10i16_to_v5i32_scalar(<10 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v10i16_to_v5i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_4
+; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v4, s20, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
@@ -940,8 +906,6 @@ define inreg <5 x i32> @bitcast_v10i16_to_v5i32_scalar(<10 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB7_3:
-; GFX9-NEXT: s_branch .LBB7_2
-; GFX9-NEXT: .LBB7_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -955,10 +919,10 @@ define inreg <5 x i32> @bitcast_v10i16_to_v5i32_scalar(<10 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
; GFX11-NEXT: .LBB7_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
@@ -967,8 +931,6 @@ define inreg <5 x i32> @bitcast_v10i16_to_v5i32_scalar(<10 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: s_branch .LBB7_2
-; GFX11-NEXT: .LBB7_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_mov_b32_e32 v4, s4
@@ -1114,7 +1076,7 @@ define inreg <10 x half> @bitcast_v5i32_to_v10f16_scalar(<5 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s21, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s12, s19, 16
; SI-NEXT: s_lshr_b32 s13, s17, 16
@@ -1155,19 +1117,12 @@ define inreg <10 x half> @bitcast_v5i32_to_v10f16_scalar(<5 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v3, s8
; SI-NEXT: v_mov_b32_e32 v4, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v5i32_to_v10f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -1183,14 +1138,12 @@ define inreg <10 x half> @bitcast_v5i32_to_v10f16_scalar(<5 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v5i32_to_v10f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -1206,15 +1159,13 @@ define inreg <10 x half> @bitcast_v5i32_to_v10f16_scalar(<5 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v5i32_to_v10f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -1230,8 +1181,6 @@ define inreg <10 x half> @bitcast_v5i32_to_v10f16_scalar(<5 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_mov_b32_e32 v4, s16
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1437,7 +1386,7 @@ define inreg <5 x i32> @bitcast_v10f16_to_v5i32_scalar(<10 x half> inreg %a, i32
; SI-NEXT: s_lshr_b32 s14, s17, 16
; SI-NEXT: s_lshr_b32 s15, s16, 16
; SI-NEXT: s_cmp_lg_u32 s21, 0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s15, 16
@@ -1454,7 +1403,7 @@ define inreg <5 x i32> @bitcast_v10f16_to_v5i32_scalar(<10 x half> inreg %a, i32
; SI-NEXT: s_and_b32 s8, s20, 0xffff
; SI-NEXT: s_lshl_b32 s10, s9, 16
; SI-NEXT: s_or_b32 s8, s8, s10
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s15
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -1498,9 +1447,6 @@ define inreg <5 x i32> @bitcast_v10f16_to_v5i32_scalar(<10 x half> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v4, v6, v4
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB11_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -1512,9 +1458,9 @@ define inreg <5 x i32> @bitcast_v10f16_to_v5i32_scalar(<10 x half> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s20, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -1544,8 +1490,6 @@ define inreg <5 x i32> @bitcast_v10f16_to_v5i32_scalar(<10 x half> inreg %a, i32
; VI-NEXT: v_or_b32_e32 v0, v0, v5
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1557,9 +1501,9 @@ define inreg <5 x i32> @bitcast_v10f16_to_v5i32_scalar(<10 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v4, s20, v0 op_sel_hi:[1,0]
@@ -1569,8 +1513,6 @@ define inreg <5 x i32> @bitcast_v10f16_to_v5i32_scalar(<10 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1584,10 +1526,10 @@ define inreg <5 x i32> @bitcast_v10f16_to_v5i32_scalar(<10 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
@@ -1596,8 +1538,6 @@ define inreg <5 x i32> @bitcast_v10f16_to_v5i32_scalar(<10 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_mov_b32_e32 v4, s4
@@ -1741,14 +1681,14 @@ define inreg <10 x i16> @bitcast_v5f32_to_v10i16_scalar(<5 x float> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s21, 0
-; SI-NEXT: s_cbranch_scc0 .LBB13_3
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s13, s19, 16
; SI-NEXT: s_lshr_b32 s12, s17, 16
; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB13_4
+; SI-NEXT: s_cbranch_execnz .LBB13_3
; SI-NEXT: .LBB13_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v3, s19, 1.0
; SI-NEXT: v_add_f32_e64 v2, s18, 1.0
@@ -1760,15 +1700,8 @@ define inreg <10 x i16> @bitcast_v5f32_to_v10i16_scalar(<5 x float> inreg %a, i3
; SI-NEXT: v_lshr_b64 v[5:6], v[4:5], 16
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v1
-; SI-NEXT: s_branch .LBB13_5
+; SI-NEXT: s_branch .LBB13_4
; SI-NEXT: .LBB13_3:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB13_2
-; SI-NEXT: .LBB13_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -1779,7 +1712,7 @@ define inreg <10 x i16> @bitcast_v5f32_to_v10i16_scalar(<5 x float> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v5, s6
; SI-NEXT: v_mov_b32_e32 v8, s8
; SI-NEXT: v_mov_b32_e32 v7, s4
-; SI-NEXT: .LBB13_5: ; %end
+; SI-NEXT: .LBB13_4: ; %end
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_or_b32_e32 v1, v1, v6
@@ -1801,9 +1734,9 @@ define inreg <10 x i16> @bitcast_v5f32_to_v10i16_scalar(<5 x float> inreg %a, i3
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_3
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB13_4
+; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v4, s20, 1.0
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -1812,8 +1745,6 @@ define inreg <10 x i16> @bitcast_v5f32_to_v10i16_scalar(<5 x float> inreg %a, i3
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB13_3:
-; VI-NEXT: s_branch .LBB13_2
-; VI-NEXT: .LBB13_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1828,9 +1759,9 @@ define inreg <10 x i16> @bitcast_v5f32_to_v10i16_scalar(<5 x float> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB13_4
+; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v4, s20, 1.0
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -1839,8 +1770,6 @@ define inreg <10 x i16> @bitcast_v5f32_to_v10i16_scalar(<5 x float> inreg %a, i3
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB13_3:
-; GFX9-NEXT: s_branch .LBB13_2
-; GFX9-NEXT: .LBB13_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1857,10 +1786,10 @@ define inreg <10 x i16> @bitcast_v5f32_to_v10i16_scalar(<5 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
; GFX11-NEXT: .LBB13_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v4, s4, 1.0
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
@@ -1869,8 +1798,6 @@ define inreg <10 x i16> @bitcast_v5f32_to_v10i16_scalar(<5 x float> inreg %a, i3
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB13_3:
-; GFX11-NEXT: s_branch .LBB13_2
-; GFX11-NEXT: .LBB13_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -2061,7 +1988,7 @@ define inreg <5 x float> @bitcast_v10i16_to_v5f32_scalar(<10 x i16> inreg %a, i3
; SI-NEXT: s_lshr_b32 s14, s17, 16
; SI-NEXT: s_lshr_b32 s15, s16, 16
; SI-NEXT: s_cmp_lg_u32 s21, 0
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s15, 16
@@ -2112,15 +2039,12 @@ define inreg <5 x float> @bitcast_v10i16_to_v5f32_scalar(<10 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v3, s7
; SI-NEXT: v_mov_b32_e32 v4, s8
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v10i16_to_v5f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB15_3
; VI-NEXT: .LBB15_2: ; %cmp.true
@@ -2156,16 +2080,14 @@ define inreg <5 x float> @bitcast_v10i16_to_v5f32_scalar(<10 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v10i16_to_v5f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v4, s20, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
@@ -2174,8 +2096,6 @@ define inreg <5 x float> @bitcast_v10i16_to_v5f32_scalar(<10 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2189,10 +2109,10 @@ define inreg <5 x float> @bitcast_v10i16_to_v5f32_scalar(<10 x i16> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
@@ -2201,8 +2121,6 @@ define inreg <5 x float> @bitcast_v10i16_to_v5f32_scalar(<10 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_mov_b32_e32 v4, s4
@@ -2346,14 +2264,14 @@ define inreg <10 x half> @bitcast_v5f32_to_v10f16_scalar(<5 x float> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s21, 0
-; SI-NEXT: s_cbranch_scc0 .LBB17_3
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s13, s19, 16
; SI-NEXT: s_lshr_b32 s12, s17, 16
; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB17_4
+; SI-NEXT: s_cbranch_execnz .LBB17_3
; SI-NEXT: .LBB17_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v3, s19, 1.0
; SI-NEXT: v_add_f32_e64 v2, s18, 1.0
@@ -2365,15 +2283,8 @@ define inreg <10 x half> @bitcast_v5f32_to_v10f16_scalar(<5 x float> inreg %a, i
; SI-NEXT: v_lshr_b64 v[5:6], v[4:5], 16
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v1
-; SI-NEXT: s_branch .LBB17_5
+; SI-NEXT: s_branch .LBB17_4
; SI-NEXT: .LBB17_3:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB17_2
-; SI-NEXT: .LBB17_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -2384,7 +2295,7 @@ define inreg <10 x half> @bitcast_v5f32_to_v10f16_scalar(<5 x float> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v5, s6
; SI-NEXT: v_mov_b32_e32 v8, s8
; SI-NEXT: v_mov_b32_e32 v7, s4
-; SI-NEXT: .LBB17_5: ; %end
+; SI-NEXT: .LBB17_4: ; %end
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_or_b32_e32 v1, v1, v6
@@ -2406,9 +2317,9 @@ define inreg <10 x half> @bitcast_v5f32_to_v10f16_scalar(<5 x float> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_3
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB17_4
+; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v4, s20, 1.0
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -2417,8 +2328,6 @@ define inreg <10 x half> @bitcast_v5f32_to_v10f16_scalar(<5 x float> inreg %a, i
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB17_3:
-; VI-NEXT: s_branch .LBB17_2
-; VI-NEXT: .LBB17_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2433,9 +2342,9 @@ define inreg <10 x half> @bitcast_v5f32_to_v10f16_scalar(<5 x float> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB17_4
+; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v4, s20, 1.0
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -2444,8 +2353,6 @@ define inreg <10 x half> @bitcast_v5f32_to_v10f16_scalar(<5 x float> inreg %a, i
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB17_3:
-; GFX9-NEXT: s_branch .LBB17_2
-; GFX9-NEXT: .LBB17_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2462,10 +2369,10 @@ define inreg <10 x half> @bitcast_v5f32_to_v10f16_scalar(<5 x float> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
; GFX11-NEXT: .LBB17_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v4, s4, 1.0
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
@@ -2474,8 +2381,6 @@ define inreg <10 x half> @bitcast_v5f32_to_v10f16_scalar(<5 x float> inreg %a, i
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB17_3:
-; GFX11-NEXT: s_branch .LBB17_2
-; GFX11-NEXT: .LBB17_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -2686,7 +2591,7 @@ define inreg <5 x float> @bitcast_v10f16_to_v5f32_scalar(<10 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s14, s17, 16
; SI-NEXT: s_lshr_b32 s15, s16, 16
; SI-NEXT: s_cmp_lg_u32 s21, 0
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s15, 16
@@ -2703,7 +2608,7 @@ define inreg <5 x float> @bitcast_v10f16_to_v5f32_scalar(<10 x half> inreg %a, i
; SI-NEXT: s_and_b32 s8, s20, 0xffff
; SI-NEXT: s_lshl_b32 s10, s9, 16
; SI-NEXT: s_or_b32 s8, s8, s10
-; SI-NEXT: s_cbranch_execnz .LBB19_4
+; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s15
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -2747,9 +2652,6 @@ define inreg <5 x float> @bitcast_v10f16_to_v5f32_scalar(<10 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v4, v6, v4
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8
-; SI-NEXT: s_branch .LBB19_2
-; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -2761,9 +2663,9 @@ define inreg <5 x float> @bitcast_v10f16_to_v5f32_scalar(<10 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
+; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s20, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -2793,8 +2695,6 @@ define inreg <5 x float> @bitcast_v10f16_to_v5f32_scalar(<10 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v0, v5
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
-; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2806,9 +2706,9 @@ define inreg <5 x float> @bitcast_v10f16_to_v5f32_scalar(<10 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v4, s20, v0 op_sel_hi:[1,0]
@@ -2818,8 +2718,6 @@ define inreg <5 x float> @bitcast_v10f16_to_v5f32_scalar(<10 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2833,10 +2731,10 @@ define inreg <5 x float> @bitcast_v10f16_to_v5f32_scalar(<10 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
@@ -2845,8 +2743,6 @@ define inreg <5 x float> @bitcast_v10f16_to_v5f32_scalar(<10 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_mov_b32_e32 v4, s4
@@ -3055,7 +2951,7 @@ define inreg <10 x half> @bitcast_v10i16_to_v10f16_scalar(<10 x i16> inreg %a, i
; SI-NEXT: s_lshr_b32 s15, s17, 16
; SI-NEXT: s_lshr_b32 s23, s16, 16
; SI-NEXT: s_cmp_lg_u32 s21, 0
-; SI-NEXT: s_cbranch_scc0 .LBB21_4
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s5, s20, 0xffff
; SI-NEXT: s_lshl_b32 s7, s14, 16
@@ -3130,19 +3026,12 @@ define inreg <10 x half> @bitcast_v10i16_to_v10f16_scalar(<10 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v3, s5
; SI-NEXT: v_mov_b32_e32 v4, s8
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr21
-; SI-NEXT: s_branch .LBB21_2
;
; VI-LABEL: bitcast_v10i16_to_v10f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
@@ -3178,16 +3067,14 @@ define inreg <10 x half> @bitcast_v10i16_to_v10f16_scalar(<10 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v10i16_to_v10f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
+; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v4, s20, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
@@ -3196,8 +3083,6 @@ define inreg <10 x half> @bitcast_v10i16_to_v10f16_scalar(<10 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
-; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -3214,10 +3099,10 @@ define inreg <10 x half> @bitcast_v10i16_to_v10f16_scalar(<10 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
; GFX11-NEXT: .LBB21_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
@@ -3226,8 +3111,6 @@ define inreg <10 x half> @bitcast_v10i16_to_v10f16_scalar(<10 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
-; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -3417,9 +3300,9 @@ define inreg <10 x i16> @bitcast_v10f16_to_v10i16_scalar(<10 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s6, s17, 16
; SI-NEXT: s_lshr_b32 s10, s16, 16
; SI-NEXT: s_cmp_lg_u32 s21, 0
-; SI-NEXT: s_cbranch_scc0 .LBB23_3
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_4
+; SI-NEXT: s_cbranch_execnz .LBB23_3
; SI-NEXT: .LBB23_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s10
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -3463,10 +3346,8 @@ define inreg <10 x i16> @bitcast_v10f16_to_v10i16_scalar(<10 x half> inreg %a, i
; SI-NEXT: v_lshr_b64 v[6:7], v[0:1], 16
; SI-NEXT: v_lshr_b64 v[4:5], v[2:3], 16
; SI-NEXT: v_or_b32_e32 v5, v14, v13
-; SI-NEXT: s_branch .LBB23_5
+; SI-NEXT: s_branch .LBB23_4
; SI-NEXT: .LBB23_3:
-; SI-NEXT: s_branch .LBB23_2
-; SI-NEXT: .LBB23_4:
; SI-NEXT: v_mov_b32_e32 v8, s8
; SI-NEXT: v_mov_b32_e32 v11, s7
; SI-NEXT: v_mov_b32_e32 v12, s6
@@ -3477,7 +3358,7 @@ define inreg <10 x i16> @bitcast_v10f16_to_v10i16_scalar(<10 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v9, s16
; SI-NEXT: v_mov_b32_e32 v6, s10
; SI-NEXT: v_mov_b32_e32 v4, s9
-; SI-NEXT: .LBB23_5: ; %end
+; SI-NEXT: .LBB23_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v9
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v6
; SI-NEXT: v_or_b32_e32 v0, v0, v2
@@ -3499,9 +3380,9 @@ define inreg <10 x i16> @bitcast_v10f16_to_v10i16_scalar(<10 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_3
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_4
+; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -3531,8 +3412,6 @@ define inreg <10 x i16> @bitcast_v10f16_to_v10i16_scalar(<10 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v5, v6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB23_3:
-; VI-NEXT: s_branch .LBB23_2
-; VI-NEXT: .LBB23_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -3547,9 +3426,9 @@ define inreg <10 x i16> @bitcast_v10f16_to_v10i16_scalar(<10 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_4
+; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v4, s20, v0 op_sel_hi:[1,0]
@@ -3559,8 +3438,6 @@ define inreg <10 x i16> @bitcast_v10f16_to_v10i16_scalar(<10 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB23_3:
-; GFX9-NEXT: s_branch .LBB23_2
-; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -3577,10 +3454,10 @@ define inreg <10 x i16> @bitcast_v10f16_to_v10i16_scalar(<10 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
; GFX11-NEXT: .LBB23_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
@@ -3589,8 +3466,6 @@ define inreg <10 x i16> @bitcast_v10f16_to_v10i16_scalar(<10 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
-; GFX11-NEXT: .LBB23_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.16bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.16bit.ll
index b6b321a08f7aa..bc51639e0644b 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.16bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.16bit.ll
@@ -105,7 +105,7 @@ define inreg half @bitcast_i16_to_f16_scalar(i16 inreg %a, i32 inreg %b) {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_and_b32 s6, s16, 0xffff
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -113,14 +113,12 @@ define inreg half @bitcast_i16_to_f16_scalar(i16 inreg %a, i32 inreg %b) {
; SI-NEXT: .LBB1_3: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_i16_to_f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -128,14 +126,12 @@ define inreg half @bitcast_i16_to_f16_scalar(i16 inreg %a, i32 inreg %b) {
; VI-NEXT: .LBB1_3: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_i16_to_f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -143,8 +139,6 @@ define inreg half @bitcast_i16_to_f16_scalar(i16 inreg %a, i32 inreg %b) {
; GFX9-NEXT: .LBB1_3: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_i16_to_f16_scalar:
; GFX11: ; %bb.0:
@@ -292,19 +286,16 @@ define inreg i16 @bitcast_f16_to_i16_scalar(half inreg %a, i32 inreg %b) {
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s16, 0xffff
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s16
; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB3_3:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -312,16 +303,14 @@ define inreg i16 @bitcast_f16_to_i16_scalar(half inreg %a, i32 inreg %b) {
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -329,16 +318,14 @@ define inreg i16 @bitcast_f16_to_i16_scalar(half inreg %a, i32 inreg %b) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_add_f16_e32 v0, s16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -347,16 +334,14 @@ define inreg i16 @bitcast_f16_to_i16_scalar(half inreg %a, i32 inreg %b) {
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-TRUE16-NEXT: .LBB3_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f16_e64 v0.l, 0x200, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB3_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB3_2
-; GFX11-TRUE16-NEXT: .LBB3_4:
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -365,16 +350,14 @@ define inreg i16 @bitcast_f16_to_i16_scalar(half inreg %a, i32 inreg %b) {
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-FAKE16-NEXT: .LBB3_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f16_e64 v0, 0x200, s0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB3_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB3_2
-; GFX11-FAKE16-NEXT: .LBB3_4:
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -496,7 +479,7 @@ define inreg bfloat @bitcast_i16_to_bf16_scalar(i16 inreg %a, i32 inreg %b) {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_and_b32 s6, s16, 0xffff
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s7, s6, 16
; SI-NEXT: s_cbranch_execnz .LBB5_3
@@ -507,15 +490,12 @@ define inreg bfloat @bitcast_i16_to_bf16_scalar(i16 inreg %a, i32 inreg %b) {
; SI-NEXT: v_mul_f32_e64 v0, 1.0, s7
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_i16_to_bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -523,14 +503,12 @@ define inreg bfloat @bitcast_i16_to_bf16_scalar(i16 inreg %a, i32 inreg %b) {
; VI-NEXT: .LBB5_3: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_i16_to_bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -538,8 +516,6 @@ define inreg bfloat @bitcast_i16_to_bf16_scalar(i16 inreg %a, i32 inreg %b) {
; GFX9-NEXT: .LBB5_3: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_i16_to_bf16_scalar:
; GFX11: ; %bb.0:
@@ -727,7 +703,7 @@ define inreg i16 @bitcast_bf16_to_i16_scalar(bfloat inreg %a, i32 inreg %b) {
; SI-NEXT: s_lshl_b32 s4, s16, 16
; SI-NEXT: s_cmp_lg_u32 s17, 0
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s4
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v1
; SI-NEXT: s_cbranch_execnz .LBB7_3
@@ -737,17 +713,14 @@ define inreg i16 @bitcast_bf16_to_i16_scalar(bfloat inreg %a, i32 inreg %b) {
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; SI-NEXT: .LBB7_3: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_bf16_to_i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_3
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_4
+; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -761,8 +734,6 @@ define inreg i16 @bitcast_bf16_to_i16_scalar(bfloat inreg %a, i32 inreg %b) {
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB7_3:
-; VI-NEXT: s_branch .LBB7_2
-; VI-NEXT: .LBB7_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -770,9 +741,9 @@ define inreg i16 @bitcast_bf16_to_i16_scalar(bfloat inreg %a, i32 inreg %b) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_4
+; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
; GFX9-NEXT: s_lshl_b32 s4, s16, 16
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -786,8 +757,6 @@ define inreg i16 @bitcast_bf16_to_i16_scalar(bfloat inreg %a, i32 inreg %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB7_3:
-; GFX9-NEXT: s_branch .LBB7_2
-; GFX9-NEXT: .LBB7_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -796,10 +765,10 @@ define inreg i16 @bitcast_bf16_to_i16_scalar(bfloat inreg %a, i32 inreg %b) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
; GFX11-NEXT: .LBB7_2: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s0, s0, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -815,8 +784,6 @@ define inreg i16 @bitcast_bf16_to_i16_scalar(bfloat inreg %a, i32 inreg %b) {
; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: s_branch .LBB7_2
-; GFX11-NEXT: .LBB7_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -944,22 +911,19 @@ define inreg bfloat @bitcast_f16_to_bf16_scalar(half inreg %a, i32 inreg %b) {
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_3
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s6, s16, 16
-; SI-NEXT: s_cbranch_execnz .LBB9_4
+; SI-NEXT: s_cbranch_execnz .LBB9_3
; SI-NEXT: .LBB9_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s16
; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: s_branch .LBB9_5
+; SI-NEXT: s_branch .LBB9_4
; SI-NEXT: .LBB9_3:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB9_2
-; SI-NEXT: .LBB9_4:
; SI-NEXT: v_mov_b32_e32 v0, s6
-; SI-NEXT: .LBB9_5: ; %end
+; SI-NEXT: .LBB9_4: ; %end
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -968,16 +932,14 @@ define inreg bfloat @bitcast_f16_to_bf16_scalar(half inreg %a, i32 inreg %b) {
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_3
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_4
+; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB9_3:
-; VI-NEXT: s_branch .LBB9_2
-; VI-NEXT: .LBB9_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -985,16 +947,14 @@ define inreg bfloat @bitcast_f16_to_bf16_scalar(half inreg %a, i32 inreg %b) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_4
+; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_add_f16_e32 v0, s16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB9_3:
-; GFX9-NEXT: s_branch .LBB9_2
-; GFX9-NEXT: .LBB9_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -1003,16 +963,14 @@ define inreg bfloat @bitcast_f16_to_bf16_scalar(half inreg %a, i32 inreg %b) {
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB9_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB9_3
; GFX11-TRUE16-NEXT: .LBB9_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f16_e64 v0.l, 0x200, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB9_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB9_2
-; GFX11-TRUE16-NEXT: .LBB9_4:
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -1021,16 +979,14 @@ define inreg bfloat @bitcast_f16_to_bf16_scalar(half inreg %a, i32 inreg %b) {
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB9_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB9_3
; GFX11-FAKE16-NEXT: .LBB9_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f16_e64 v0, 0x200, s0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB9_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB9_2
-; GFX11-FAKE16-NEXT: .LBB9_4:
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -1201,7 +1157,7 @@ define inreg half @bitcast_bf16_to_f16_scalar(bfloat inreg %a, i32 inreg %b) {
; SI-NEXT: s_lshl_b32 s4, s16, 16
; SI-NEXT: s_cmp_lg_u32 s17, 0
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s4
-; SI-NEXT: s_cbranch_scc0 .LBB11_4
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v1
; SI-NEXT: s_cbranch_execnz .LBB11_3
@@ -1211,17 +1167,14 @@ define inreg half @bitcast_bf16_to_f16_scalar(bfloat inreg %a, i32 inreg %b) {
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; SI-NEXT: .LBB11_3: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB11_4:
-; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: s_branch .LBB11_2
;
; VI-LABEL: bitcast_bf16_to_f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -1235,8 +1188,6 @@ define inreg half @bitcast_bf16_to_f16_scalar(bfloat inreg %a, i32 inreg %b) {
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1244,9 +1195,9 @@ define inreg half @bitcast_bf16_to_f16_scalar(bfloat inreg %a, i32 inreg %b) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: s_lshl_b32 s4, s16, 16
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -1260,8 +1211,6 @@ define inreg half @bitcast_bf16_to_f16_scalar(bfloat inreg %a, i32 inreg %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -1270,10 +1219,10 @@ define inreg half @bitcast_bf16_to_f16_scalar(bfloat inreg %a, i32 inreg %b) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s0, s0, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -1289,8 +1238,6 @@ define inreg half @bitcast_bf16_to_f16_scalar(bfloat inreg %a, i32 inreg %b) {
; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.192bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.192bit.ll
index d463b115d1088..10c48f1da4e6e 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.192bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.192bit.ll
@@ -101,7 +101,7 @@ define inreg <6 x float> @bitcast_v6i32_to_v6f32_scalar(<6 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -119,14 +119,12 @@ define inreg <6 x float> @bitcast_v6i32_to_v6f32_scalar(<6 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v6i32_to_v6f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -144,14 +142,12 @@ define inreg <6 x float> @bitcast_v6i32_to_v6f32_scalar(<6 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v6i32_to_v6f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -169,15 +165,13 @@ define inreg <6 x float> @bitcast_v6i32_to_v6f32_scalar(<6 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v6i32_to_v6f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -194,8 +188,6 @@ define inreg <6 x float> @bitcast_v6i32_to_v6f32_scalar(<6 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -305,9 +297,9 @@ define inreg <6 x i32> @bitcast_v6f32_to_v6i32_scalar(<6 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
; SI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -317,8 +309,6 @@ define inreg <6 x i32> @bitcast_v6f32_to_v6i32_scalar(<6 x float> inreg %a, i32
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -331,9 +321,9 @@ define inreg <6 x i32> @bitcast_v6f32_to_v6i32_scalar(<6 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
; VI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -343,8 +333,6 @@ define inreg <6 x i32> @bitcast_v6f32_to_v6i32_scalar(<6 x float> inreg %a, i32
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -357,9 +345,9 @@ define inreg <6 x i32> @bitcast_v6f32_to_v6i32_scalar(<6 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
; GFX9-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -369,8 +357,6 @@ define inreg <6 x i32> @bitcast_v6f32_to_v6i32_scalar(<6 x float> inreg %a, i32
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -386,10 +372,10 @@ define inreg <6 x i32> @bitcast_v6f32_to_v6i32_scalar(<6 x float> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
; GFX11-NEXT: v_add_f32_e64 v4, s4, 1.0
@@ -399,8 +385,6 @@ define inreg <6 x i32> @bitcast_v6f32_to_v6i32_scalar(<6 x float> inreg %a, i32
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -517,7 +501,7 @@ define inreg <3 x i64> @bitcast_v6i32_to_v3i64_scalar(<6 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB5_3
; SI-NEXT: .LBB5_2: ; %cmp.true
@@ -535,14 +519,12 @@ define inreg <3 x i64> @bitcast_v6i32_to_v3i64_scalar(<6 x i32> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v6i32_to_v3i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -560,14 +542,12 @@ define inreg <3 x i64> @bitcast_v6i32_to_v3i64_scalar(<6 x i32> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v6i32_to_v3i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -585,15 +565,13 @@ define inreg <3 x i64> @bitcast_v6i32_to_v3i64_scalar(<6 x i32> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v6i32_to_v3i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -610,8 +588,6 @@ define inreg <3 x i64> @bitcast_v6i32_to_v3i64_scalar(<6 x i32> inreg %a, i32 in
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -726,7 +702,7 @@ define inreg <6 x i32> @bitcast_v3i64_to_v6i32_scalar(<3 x i64> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB7_3
; SI-NEXT: .LBB7_2: ; %cmp.true
@@ -744,14 +720,12 @@ define inreg <6 x i32> @bitcast_v3i64_to_v6i32_scalar(<3 x i64> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v3i64_to_v6i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -769,14 +743,12 @@ define inreg <6 x i32> @bitcast_v3i64_to_v6i32_scalar(<3 x i64> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v3i64_to_v6i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
@@ -794,15 +766,13 @@ define inreg <6 x i32> @bitcast_v3i64_to_v6i32_scalar(<3 x i64> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v3i64_to_v6i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
@@ -819,8 +789,6 @@ define inreg <6 x i32> @bitcast_v3i64_to_v6i32_scalar(<3 x i64> inreg %a, i32 in
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -933,7 +901,7 @@ define inreg <3 x double> @bitcast_v6i32_to_v3f64_scalar(<6 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB9_3
; SI-NEXT: .LBB9_2: ; %cmp.true
@@ -951,14 +919,12 @@ define inreg <3 x double> @bitcast_v6i32_to_v3f64_scalar(<6 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v6i32_to_v3f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -976,14 +942,12 @@ define inreg <3 x double> @bitcast_v6i32_to_v3f64_scalar(<6 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v6i32_to_v3f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -1001,15 +965,13 @@ define inreg <3 x double> @bitcast_v6i32_to_v3f64_scalar(<6 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v6i32_to_v3f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -1026,8 +988,6 @@ define inreg <3 x double> @bitcast_v6i32_to_v3f64_scalar(<6 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1131,17 +1091,15 @@ define inreg <6 x i32> @bitcast_v3f64_to_v6i32_scalar(<3 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB11_3:
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -1154,17 +1112,15 @@ define inreg <6 x i32> @bitcast_v3f64_to_v6i32_scalar(<3 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1177,17 +1133,15 @@ define inreg <6 x i32> @bitcast_v3f64_to_v6i32_scalar(<3 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1203,18 +1157,16 @@ define inreg <6 x i32> @bitcast_v3f64_to_v6i32_scalar(<3 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -1370,7 +1322,7 @@ define inreg <12 x i16> @bitcast_v6i32_to_v12i16_scalar(<6 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB13_4
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s12, s21, 16
; SI-NEXT: s_lshr_b32 s13, s19, 16
@@ -1418,20 +1370,12 @@ define inreg <12 x i16> @bitcast_v6i32_to_v12i16_scalar(<6 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v4, s4
; SI-NEXT: v_mov_b32_e32 v5, s9
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB13_4:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: s_branch .LBB13_2
;
; VI-LABEL: bitcast_v6i32_to_v12i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
@@ -1449,14 +1393,12 @@ define inreg <12 x i16> @bitcast_v6i32_to_v12i16_scalar(<6 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v6i32_to_v12i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
@@ -1474,15 +1416,13 @@ define inreg <12 x i16> @bitcast_v6i32_to_v12i16_scalar(<6 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v6i32_to_v12i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
@@ -1499,8 +1439,6 @@ define inreg <12 x i16> @bitcast_v6i32_to_v12i16_scalar(<6 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1703,7 +1641,7 @@ define inreg <6 x i32> @bitcast_v12i16_to_v6i32_scalar(<12 x i16> inreg %a, i32
; SI-NEXT: s_lshr_b32 s23, s17, 16
; SI-NEXT: s_lshr_b32 s24, s16, 16
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s24, 16
@@ -1763,15 +1701,12 @@ define inreg <6 x i32> @bitcast_v12i16_to_v6i32_scalar(<12 x i16> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v4, s8
; SI-NEXT: v_mov_b32_e32 v5, s9
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v12i16_to_v6i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB15_3
; VI-NEXT: .LBB15_2: ; %cmp.true
@@ -1813,16 +1748,14 @@ define inreg <6 x i32> @bitcast_v12i16_to_v6i32_scalar(<12 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v12i16_to_v6i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v4, s20, 3 op_sel_hi:[1,0]
@@ -1832,8 +1765,6 @@ define inreg <6 x i32> @bitcast_v12i16_to_v6i32_scalar(<12 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1849,10 +1780,10 @@ define inreg <6 x i32> @bitcast_v12i16_to_v6i32_scalar(<12 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
@@ -1862,8 +1793,6 @@ define inreg <6 x i32> @bitcast_v12i16_to_v6i32_scalar(<12 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -2019,7 +1948,7 @@ define inreg <12 x half> @bitcast_v6i32_to_v12f16_scalar(<6 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB17_4
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s12, s21, 16
; SI-NEXT: s_lshr_b32 s13, s19, 16
@@ -2067,20 +1996,12 @@ define inreg <12 x half> @bitcast_v6i32_to_v12f16_scalar(<6 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v4, s4
; SI-NEXT: v_mov_b32_e32 v5, s9
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB17_4:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: s_branch .LBB17_2
;
; VI-LABEL: bitcast_v6i32_to_v12f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
@@ -2098,14 +2019,12 @@ define inreg <12 x half> @bitcast_v6i32_to_v12f16_scalar(<6 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v6i32_to_v12f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
@@ -2123,15 +2042,13 @@ define inreg <12 x half> @bitcast_v6i32_to_v12f16_scalar(<6 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v6i32_to_v12f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
@@ -2148,8 +2065,6 @@ define inreg <12 x half> @bitcast_v6i32_to_v12f16_scalar(<6 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2376,7 +2291,7 @@ define inreg <6 x i32> @bitcast_v12f16_to_v6i32_scalar(<12 x half> inreg %a, i32
; SI-NEXT: s_lshr_b32 s23, s17, 16
; SI-NEXT: s_lshr_b32 s24, s16, 16
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s24, 16
@@ -2396,7 +2311,7 @@ define inreg <6 x i32> @bitcast_v12f16_to_v6i32_scalar(<12 x half> inreg %a, i32
; SI-NEXT: s_and_b32 s9, s21, 0xffff
; SI-NEXT: s_lshl_b32 s10, s12, 16
; SI-NEXT: s_or_b32 s9, s9, s10
-; SI-NEXT: s_cbranch_execnz .LBB19_4
+; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s24
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -2448,9 +2363,6 @@ define inreg <6 x i32> @bitcast_v12f16_to_v6i32_scalar(<12 x half> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v5, v7, v5
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9
-; SI-NEXT: s_branch .LBB19_2
-; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -2463,9 +2375,9 @@ define inreg <6 x i32> @bitcast_v12f16_to_v6i32_scalar(<12 x half> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
+; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s21, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -2500,8 +2412,6 @@ define inreg <6 x i32> @bitcast_v12f16_to_v6i32_scalar(<12 x half> inreg %a, i32
; VI-NEXT: v_or_b32_e32 v0, v0, v6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
-; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2514,9 +2424,9 @@ define inreg <6 x i32> @bitcast_v12f16_to_v6i32_scalar(<12 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v5, s21, v0 op_sel_hi:[1,0]
@@ -2527,8 +2437,6 @@ define inreg <6 x i32> @bitcast_v12f16_to_v6i32_scalar(<12 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2544,10 +2452,10 @@ define inreg <6 x i32> @bitcast_v12f16_to_v6i32_scalar(<12 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
@@ -2557,8 +2465,6 @@ define inreg <6 x i32> @bitcast_v12f16_to_v6i32_scalar(<12 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -2672,9 +2578,9 @@ define inreg <3 x i64> @bitcast_v6f32_to_v3i64_scalar(<6 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB21_3
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_4
+; SI-NEXT: s_cbranch_execnz .LBB21_3
; SI-NEXT: .LBB21_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
; SI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -2684,8 +2590,6 @@ define inreg <3 x i64> @bitcast_v6f32_to_v3i64_scalar(<6 x float> inreg %a, i32
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB21_3:
-; SI-NEXT: s_branch .LBB21_2
-; SI-NEXT: .LBB21_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -2698,9 +2602,9 @@ define inreg <3 x i64> @bitcast_v6f32_to_v3i64_scalar(<6 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_3
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_4
+; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
; VI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -2710,8 +2614,6 @@ define inreg <3 x i64> @bitcast_v6f32_to_v3i64_scalar(<6 x float> inreg %a, i32
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB21_3:
-; VI-NEXT: s_branch .LBB21_2
-; VI-NEXT: .LBB21_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2724,9 +2626,9 @@ define inreg <3 x i64> @bitcast_v6f32_to_v3i64_scalar(<6 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
+; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
; GFX9-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -2736,8 +2638,6 @@ define inreg <3 x i64> @bitcast_v6f32_to_v3i64_scalar(<6 x float> inreg %a, i32
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
-; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2753,10 +2653,10 @@ define inreg <3 x i64> @bitcast_v6f32_to_v3i64_scalar(<6 x float> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
; GFX11-NEXT: .LBB21_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
; GFX11-NEXT: v_add_f32_e64 v4, s4, 1.0
@@ -2766,8 +2666,6 @@ define inreg <3 x i64> @bitcast_v6f32_to_v3i64_scalar(<6 x float> inreg %a, i32
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
-; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -2886,7 +2784,7 @@ define inreg <6 x float> @bitcast_v3i64_to_v6f32_scalar(<3 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB23_3
; SI-NEXT: .LBB23_2: ; %cmp.true
@@ -2904,14 +2802,12 @@ define inreg <6 x float> @bitcast_v3i64_to_v6f32_scalar(<3 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v3i64_to_v6f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
@@ -2929,14 +2825,12 @@ define inreg <6 x float> @bitcast_v3i64_to_v6f32_scalar(<3 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v3i64_to_v6f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
@@ -2954,15 +2848,13 @@ define inreg <6 x float> @bitcast_v3i64_to_v6f32_scalar(<3 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v3i64_to_v6f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
@@ -2979,8 +2871,6 @@ define inreg <6 x float> @bitcast_v3i64_to_v6f32_scalar(<3 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: s_branch .LBB23_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3090,9 +2980,9 @@ define inreg <3 x double> @bitcast_v6f32_to_v3f64_scalar(<6 x float> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB25_3
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_4
+; SI-NEXT: s_cbranch_execnz .LBB25_3
; SI-NEXT: .LBB25_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
; SI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -3102,8 +2992,6 @@ define inreg <3 x double> @bitcast_v6f32_to_v3f64_scalar(<6 x float> inreg %a, i
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB25_3:
-; SI-NEXT: s_branch .LBB25_2
-; SI-NEXT: .LBB25_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -3116,9 +3004,9 @@ define inreg <3 x double> @bitcast_v6f32_to_v3f64_scalar(<6 x float> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB25_3
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_4
+; VI-NEXT: s_cbranch_execnz .LBB25_3
; VI-NEXT: .LBB25_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
; VI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -3128,8 +3016,6 @@ define inreg <3 x double> @bitcast_v6f32_to_v3f64_scalar(<6 x float> inreg %a, i
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB25_3:
-; VI-NEXT: s_branch .LBB25_2
-; VI-NEXT: .LBB25_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -3142,9 +3028,9 @@ define inreg <3 x double> @bitcast_v6f32_to_v3f64_scalar(<6 x float> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_4
+; GFX9-NEXT: s_cbranch_execnz .LBB25_3
; GFX9-NEXT: .LBB25_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
; GFX9-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -3154,8 +3040,6 @@ define inreg <3 x double> @bitcast_v6f32_to_v3f64_scalar(<6 x float> inreg %a, i
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB25_3:
-; GFX9-NEXT: s_branch .LBB25_2
-; GFX9-NEXT: .LBB25_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -3171,10 +3055,10 @@ define inreg <3 x double> @bitcast_v6f32_to_v3f64_scalar(<6 x float> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_3
; GFX11-NEXT: .LBB25_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
; GFX11-NEXT: v_add_f32_e64 v4, s4, 1.0
@@ -3184,8 +3068,6 @@ define inreg <3 x double> @bitcast_v6f32_to_v3f64_scalar(<6 x float> inreg %a, i
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
-; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -3293,17 +3175,15 @@ define inreg <6 x float> @bitcast_v3f64_to_v6f32_scalar(<3 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB27_3
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB27_4
+; SI-NEXT: s_cbranch_execnz .LBB27_3
; SI-NEXT: .LBB27_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB27_3:
-; SI-NEXT: s_branch .LBB27_2
-; SI-NEXT: .LBB27_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -3316,17 +3196,15 @@ define inreg <6 x float> @bitcast_v3f64_to_v6f32_scalar(<3 x double> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB27_3
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_4
+; VI-NEXT: s_cbranch_execnz .LBB27_3
; VI-NEXT: .LBB27_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB27_3:
-; VI-NEXT: s_branch .LBB27_2
-; VI-NEXT: .LBB27_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -3339,17 +3217,15 @@ define inreg <6 x float> @bitcast_v3f64_to_v6f32_scalar(<3 x double> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_4
+; GFX9-NEXT: s_cbranch_execnz .LBB27_3
; GFX9-NEXT: .LBB27_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB27_3:
-; GFX9-NEXT: s_branch .LBB27_2
-; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -3365,18 +3241,16 @@ define inreg <6 x float> @bitcast_v3f64_to_v6f32_scalar(<3 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB27_3
; GFX11-NEXT: .LBB27_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
-; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -3529,7 +3403,7 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s14, s21, 16
; SI-NEXT: s_lshr_b32 s13, s19, 16
@@ -3537,7 +3411,7 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3
; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB29_4
+; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
; SI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -3551,16 +3425,8 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v1
-; SI-NEXT: s_branch .LBB29_5
+; SI-NEXT: s_branch .LBB29_4
; SI-NEXT: .LBB29_3:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: s_branch .LBB29_2
-; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -3573,7 +3439,7 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v8, s8
; SI-NEXT: v_mov_b32_e32 v7, s6
; SI-NEXT: v_mov_b32_e32 v6, s4
-; SI-NEXT: .LBB29_5: ; %end
+; SI-NEXT: .LBB29_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -3598,9 +3464,9 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
+; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
; VI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -3610,8 +3476,6 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
-; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -3626,9 +3490,9 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
+; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
; GFX9-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -3638,8 +3502,6 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
-; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -3657,10 +3519,10 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-NEXT: .LBB29_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
; GFX11-NEXT: v_add_f32_e64 v4, s4, 1.0
@@ -3670,8 +3532,6 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
-; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -3879,7 +3739,7 @@ define inreg <6 x float> @bitcast_v12i16_to_v6f32_scalar(<12 x i16> inreg %a, i3
; SI-NEXT: s_lshr_b32 s23, s17, 16
; SI-NEXT: s_lshr_b32 s24, s16, 16
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s24, 16
@@ -3939,15 +3799,12 @@ define inreg <6 x float> @bitcast_v12i16_to_v6f32_scalar(<12 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v4, s8
; SI-NEXT: v_mov_b32_e32 v5, s9
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v12i16_to_v6f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB31_3
; VI-NEXT: .LBB31_2: ; %cmp.true
@@ -3989,16 +3846,14 @@ define inreg <6 x float> @bitcast_v12i16_to_v6f32_scalar(<12 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v12i16_to_v6f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_4
+; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v4, s20, 3 op_sel_hi:[1,0]
@@ -4008,8 +3863,6 @@ define inreg <6 x float> @bitcast_v12i16_to_v6f32_scalar(<12 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB31_3:
-; GFX9-NEXT: s_branch .LBB31_2
-; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -4025,10 +3878,10 @@ define inreg <6 x float> @bitcast_v12i16_to_v6f32_scalar(<12 x i16> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-NEXT: .LBB31_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
@@ -4038,8 +3891,6 @@ define inreg <6 x float> @bitcast_v12i16_to_v6f32_scalar(<12 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
-; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -4192,7 +4043,7 @@ define inreg <12 x half> @bitcast_v6f32_to_v12f16_scalar(<6 x float> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s14, s21, 16
; SI-NEXT: s_lshr_b32 s13, s19, 16
@@ -4200,7 +4051,7 @@ define inreg <12 x half> @bitcast_v6f32_to_v12f16_scalar(<6 x float> inreg %a, i
; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB33_4
+; SI-NEXT: s_cbranch_execnz .LBB33_3
; SI-NEXT: .LBB33_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
; SI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -4214,16 +4065,8 @@ define inreg <12 x half> @bitcast_v6f32_to_v12f16_scalar(<6 x float> inreg %a, i
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v1
-; SI-NEXT: s_branch .LBB33_5
+; SI-NEXT: s_branch .LBB33_4
; SI-NEXT: .LBB33_3:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: s_branch .LBB33_2
-; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -4236,7 +4079,7 @@ define inreg <12 x half> @bitcast_v6f32_to_v12f16_scalar(<6 x float> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v8, s8
; SI-NEXT: v_mov_b32_e32 v7, s6
; SI-NEXT: v_mov_b32_e32 v6, s4
-; SI-NEXT: .LBB33_5: ; %end
+; SI-NEXT: .LBB33_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -4261,9 +4104,9 @@ define inreg <12 x half> @bitcast_v6f32_to_v12f16_scalar(<6 x float> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_4
+; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
; VI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -4273,8 +4116,6 @@ define inreg <12 x half> @bitcast_v6f32_to_v12f16_scalar(<6 x float> inreg %a, i
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB33_3:
-; VI-NEXT: s_branch .LBB33_2
-; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -4289,9 +4130,9 @@ define inreg <12 x half> @bitcast_v6f32_to_v12f16_scalar(<6 x float> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
+; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
; GFX9-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -4301,8 +4142,6 @@ define inreg <12 x half> @bitcast_v6f32_to_v12f16_scalar(<6 x float> inreg %a, i
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: s_branch .LBB33_2
-; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -4320,10 +4159,10 @@ define inreg <12 x half> @bitcast_v6f32_to_v12f16_scalar(<6 x float> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-NEXT: .LBB33_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
; GFX11-NEXT: v_add_f32_e64 v4, s4, 1.0
@@ -4333,8 +4172,6 @@ define inreg <12 x half> @bitcast_v6f32_to_v12f16_scalar(<6 x float> inreg %a, i
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: s_branch .LBB33_2
-; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -4566,7 +4403,7 @@ define inreg <6 x float> @bitcast_v12f16_to_v6f32_scalar(<12 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s23, s17, 16
; SI-NEXT: s_lshr_b32 s24, s16, 16
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB35_3
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s24, 16
@@ -4586,7 +4423,7 @@ define inreg <6 x float> @bitcast_v12f16_to_v6f32_scalar(<12 x half> inreg %a, i
; SI-NEXT: s_and_b32 s9, s21, 0xffff
; SI-NEXT: s_lshl_b32 s10, s12, 16
; SI-NEXT: s_or_b32 s9, s9, s10
-; SI-NEXT: s_cbranch_execnz .LBB35_4
+; SI-NEXT: s_cbranch_execnz .LBB35_3
; SI-NEXT: .LBB35_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s24
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -4638,9 +4475,6 @@ define inreg <6 x float> @bitcast_v12f16_to_v6f32_scalar(<12 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v5, v7, v5
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB35_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9
-; SI-NEXT: s_branch .LBB35_2
-; SI-NEXT: .LBB35_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -4653,9 +4487,9 @@ define inreg <6 x float> @bitcast_v12f16_to_v6f32_scalar(<12 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB35_4
+; VI-NEXT: s_cbranch_execnz .LBB35_3
; VI-NEXT: .LBB35_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s21, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -4690,8 +4524,6 @@ define inreg <6 x float> @bitcast_v12f16_to_v6f32_scalar(<12 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v0, v6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB35_3:
-; VI-NEXT: s_branch .LBB35_2
-; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -4704,9 +4536,9 @@ define inreg <6 x float> @bitcast_v12f16_to_v6f32_scalar(<12 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
+; GFX9-NEXT: s_cbranch_execnz .LBB35_3
; GFX9-NEXT: .LBB35_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v5, s21, v0 op_sel_hi:[1,0]
@@ -4717,8 +4549,6 @@ define inreg <6 x float> @bitcast_v12f16_to_v6f32_scalar(<12 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
-; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -4734,10 +4564,10 @@ define inreg <6 x float> @bitcast_v12f16_to_v6f32_scalar(<12 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-NEXT: .LBB35_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
@@ -4747,8 +4577,6 @@ define inreg <6 x float> @bitcast_v12f16_to_v6f32_scalar(<12 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
-; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -4867,7 +4695,7 @@ define inreg <3 x double> @bitcast_v3i64_to_v3f64_scalar(<3 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB37_4
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB37_3
; SI-NEXT: .LBB37_2: ; %cmp.true
@@ -4885,14 +4713,12 @@ define inreg <3 x double> @bitcast_v3i64_to_v3f64_scalar(<3 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_4:
-; SI-NEXT: s_branch .LBB37_2
;
; VI-LABEL: bitcast_v3i64_to_v3f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB37_3
; VI-NEXT: .LBB37_2: ; %cmp.true
@@ -4910,14 +4736,12 @@ define inreg <3 x double> @bitcast_v3i64_to_v3f64_scalar(<3 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v3i64_to_v3f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB37_3
; GFX9-NEXT: .LBB37_2: ; %cmp.true
@@ -4935,15 +4759,13 @@ define inreg <3 x double> @bitcast_v3i64_to_v3f64_scalar(<3 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v3i64_to_v3f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
@@ -4959,8 +4781,6 @@ define inreg <3 x double> @bitcast_v3i64_to_v3f64_scalar(<3 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_4:
-; GFX11-NEXT: s_branch .LBB37_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5064,17 +4884,15 @@ define inreg <3 x i64> @bitcast_v3f64_to_v3i64_scalar(<3 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB39_3
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_4
+; SI-NEXT: s_cbranch_execnz .LBB39_3
; SI-NEXT: .LBB39_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB39_3:
-; SI-NEXT: s_branch .LBB39_2
-; SI-NEXT: .LBB39_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -5087,17 +4905,15 @@ define inreg <3 x i64> @bitcast_v3f64_to_v3i64_scalar(<3 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB39_3
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_4
+; VI-NEXT: s_cbranch_execnz .LBB39_3
; VI-NEXT: .LBB39_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB39_3:
-; VI-NEXT: s_branch .LBB39_2
-; VI-NEXT: .LBB39_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -5110,17 +4926,15 @@ define inreg <3 x i64> @bitcast_v3f64_to_v3i64_scalar(<3 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
+; GFX9-NEXT: s_cbranch_execnz .LBB39_3
; GFX9-NEXT: .LBB39_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
-; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -5136,18 +4950,16 @@ define inreg <3 x i64> @bitcast_v3f64_to_v3i64_scalar(<3 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_3
; GFX11-NEXT: .LBB39_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
-; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -5305,7 +5117,7 @@ define inreg <12 x i16> @bitcast_v3i64_to_v12i16_scalar(<3 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB41_4
+; SI-NEXT: s_cbranch_scc0 .LBB41_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s12, s21, 16
; SI-NEXT: s_lshr_b32 s13, s19, 16
@@ -5353,20 +5165,12 @@ define inreg <12 x i16> @bitcast_v3i64_to_v12i16_scalar(<3 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v4, s4
; SI-NEXT: v_mov_b32_e32 v5, s9
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB41_4:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: s_branch .LBB41_2
;
; VI-LABEL: bitcast_v3i64_to_v12i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB41_4
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB41_3
; VI-NEXT: .LBB41_2: ; %cmp.true
@@ -5384,14 +5188,12 @@ define inreg <12 x i16> @bitcast_v3i64_to_v12i16_scalar(<3 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB41_4:
-; VI-NEXT: s_branch .LBB41_2
;
; GFX9-LABEL: bitcast_v3i64_to_v12i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB41_3
; GFX9-NEXT: .LBB41_2: ; %cmp.true
@@ -5409,15 +5211,13 @@ define inreg <12 x i16> @bitcast_v3i64_to_v12i16_scalar(<3 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB41_4:
-; GFX9-NEXT: s_branch .LBB41_2
;
; GFX11-LABEL: bitcast_v3i64_to_v12i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
@@ -5434,8 +5234,6 @@ define inreg <12 x i16> @bitcast_v3i64_to_v12i16_scalar(<3 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB41_4:
-; GFX11-NEXT: s_branch .LBB41_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5638,7 +5436,7 @@ define inreg <3 x i64> @bitcast_v12i16_to_v3i64_scalar(<12 x i16> inreg %a, i32
; SI-NEXT: s_lshr_b32 s23, s17, 16
; SI-NEXT: s_lshr_b32 s24, s16, 16
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB43_4
+; SI-NEXT: s_cbranch_scc0 .LBB43_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s24, 16
@@ -5698,15 +5496,12 @@ define inreg <3 x i64> @bitcast_v12i16_to_v3i64_scalar(<12 x i16> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v4, s8
; SI-NEXT: v_mov_b32_e32 v5, s9
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB43_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9
-; SI-NEXT: s_branch .LBB43_2
;
; VI-LABEL: bitcast_v12i16_to_v3i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB43_4
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB43_3
; VI-NEXT: .LBB43_2: ; %cmp.true
@@ -5748,16 +5543,14 @@ define inreg <3 x i64> @bitcast_v12i16_to_v3i64_scalar(<12 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB43_4:
-; VI-NEXT: s_branch .LBB43_2
;
; GFX9-LABEL: bitcast_v12i16_to_v3i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
+; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v4, s20, 3 op_sel_hi:[1,0]
@@ -5767,8 +5560,6 @@ define inreg <3 x i64> @bitcast_v12i16_to_v3i64_scalar(<12 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
-; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -5784,10 +5575,10 @@ define inreg <3 x i64> @bitcast_v12i16_to_v3i64_scalar(<12 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-NEXT: .LBB43_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
@@ -5797,8 +5588,6 @@ define inreg <3 x i64> @bitcast_v12i16_to_v3i64_scalar(<12 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
-; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -5956,7 +5745,7 @@ define inreg <12 x half> @bitcast_v3i64_to_v12f16_scalar(<3 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB45_4
+; SI-NEXT: s_cbranch_scc0 .LBB45_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s12, s21, 16
; SI-NEXT: s_lshr_b32 s13, s19, 16
@@ -6004,20 +5793,12 @@ define inreg <12 x half> @bitcast_v3i64_to_v12f16_scalar(<3 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v4, s4
; SI-NEXT: v_mov_b32_e32 v5, s9
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB45_4:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: s_branch .LBB45_2
;
; VI-LABEL: bitcast_v3i64_to_v12f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB45_4
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB45_3
; VI-NEXT: .LBB45_2: ; %cmp.true
@@ -6035,14 +5816,12 @@ define inreg <12 x half> @bitcast_v3i64_to_v12f16_scalar(<3 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_4:
-; VI-NEXT: s_branch .LBB45_2
;
; GFX9-LABEL: bitcast_v3i64_to_v12f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB45_3
; GFX9-NEXT: .LBB45_2: ; %cmp.true
@@ -6060,15 +5839,13 @@ define inreg <12 x half> @bitcast_v3i64_to_v12f16_scalar(<3 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_4:
-; GFX9-NEXT: s_branch .LBB45_2
;
; GFX11-LABEL: bitcast_v3i64_to_v12f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
@@ -6085,8 +5862,6 @@ define inreg <12 x half> @bitcast_v3i64_to_v12f16_scalar(<3 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_4:
-; GFX11-NEXT: s_branch .LBB45_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6313,7 +6088,7 @@ define inreg <3 x i64> @bitcast_v12f16_to_v3i64_scalar(<12 x half> inreg %a, i32
; SI-NEXT: s_lshr_b32 s23, s17, 16
; SI-NEXT: s_lshr_b32 s24, s16, 16
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB47_3
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s24, 16
@@ -6333,7 +6108,7 @@ define inreg <3 x i64> @bitcast_v12f16_to_v3i64_scalar(<12 x half> inreg %a, i32
; SI-NEXT: s_and_b32 s9, s21, 0xffff
; SI-NEXT: s_lshl_b32 s10, s12, 16
; SI-NEXT: s_or_b32 s9, s9, s10
-; SI-NEXT: s_cbranch_execnz .LBB47_4
+; SI-NEXT: s_cbranch_execnz .LBB47_3
; SI-NEXT: .LBB47_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s24
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -6385,9 +6160,6 @@ define inreg <3 x i64> @bitcast_v12f16_to_v3i64_scalar(<12 x half> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v5, v7, v5
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB47_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9
-; SI-NEXT: s_branch .LBB47_2
-; SI-NEXT: .LBB47_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -6400,9 +6172,9 @@ define inreg <3 x i64> @bitcast_v12f16_to_v3i64_scalar(<12 x half> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_4
+; VI-NEXT: s_cbranch_execnz .LBB47_3
; VI-NEXT: .LBB47_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s21, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -6437,8 +6209,6 @@ define inreg <3 x i64> @bitcast_v12f16_to_v3i64_scalar(<12 x half> inreg %a, i32
; VI-NEXT: v_or_b32_e32 v0, v0, v6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB47_3:
-; VI-NEXT: s_branch .LBB47_2
-; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -6451,9 +6221,9 @@ define inreg <3 x i64> @bitcast_v12f16_to_v3i64_scalar(<12 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
+; GFX9-NEXT: s_cbranch_execnz .LBB47_3
; GFX9-NEXT: .LBB47_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v5, s21, v0 op_sel_hi:[1,0]
@@ -6464,8 +6234,6 @@ define inreg <3 x i64> @bitcast_v12f16_to_v3i64_scalar(<12 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
-; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -6481,10 +6249,10 @@ define inreg <3 x i64> @bitcast_v12f16_to_v3i64_scalar(<12 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-NEXT: .LBB47_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
@@ -6494,8 +6262,6 @@ define inreg <3 x i64> @bitcast_v12f16_to_v3i64_scalar(<12 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -6642,7 +6408,7 @@ define inreg <12 x i16> @bitcast_v3f64_to_v12i16_scalar(<3 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB49_3
+; SI-NEXT: s_cbranch_scc0 .LBB49_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s14, s21, 16
; SI-NEXT: s_lshr_b32 s13, s19, 16
@@ -6650,7 +6416,7 @@ define inreg <12 x i16> @bitcast_v3f64_to_v12i16_scalar(<3 x double> inreg %a, i
; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB49_4
+; SI-NEXT: s_cbranch_execnz .LBB49_3
; SI-NEXT: .LBB49_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
@@ -6661,16 +6427,8 @@ define inreg <12 x i16> @bitcast_v3f64_to_v12i16_scalar(<3 x double> inreg %a, i
; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v1
-; SI-NEXT: s_branch .LBB49_5
+; SI-NEXT: s_branch .LBB49_4
; SI-NEXT: .LBB49_3:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: s_branch .LBB49_2
-; SI-NEXT: .LBB49_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v4, s20
@@ -6683,7 +6441,7 @@ define inreg <12 x i16> @bitcast_v3f64_to_v12i16_scalar(<3 x double> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v8, s8
; SI-NEXT: v_mov_b32_e32 v7, s6
; SI-NEXT: v_mov_b32_e32 v6, s4
-; SI-NEXT: .LBB49_5: ; %end
+; SI-NEXT: .LBB49_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -6708,17 +6466,15 @@ define inreg <12 x i16> @bitcast_v3f64_to_v12i16_scalar(<3 x double> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB49_3
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB49_4
+; VI-NEXT: s_cbranch_execnz .LBB49_3
; VI-NEXT: .LBB49_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB49_3:
-; VI-NEXT: s_branch .LBB49_2
-; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -6733,17 +6489,15 @@ define inreg <12 x i16> @bitcast_v3f64_to_v12i16_scalar(<3 x double> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
+; GFX9-NEXT: s_cbranch_execnz .LBB49_3
; GFX9-NEXT: .LBB49_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: s_branch .LBB49_2
-; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -6761,18 +6515,16 @@ define inreg <12 x i16> @bitcast_v3f64_to_v12i16_scalar(<3 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: s_branch .LBB49_2
-; GFX11-NEXT: .LBB49_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -6980,7 +6732,7 @@ define inreg <3 x double> @bitcast_v12i16_to_v3f64_scalar(<12 x i16> inreg %a, i
; SI-NEXT: s_lshr_b32 s23, s17, 16
; SI-NEXT: s_lshr_b32 s24, s16, 16
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB51_4
+; SI-NEXT: s_cbranch_scc0 .LBB51_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s24, 16
@@ -7040,15 +6792,12 @@ define inreg <3 x double> @bitcast_v12i16_to_v3f64_scalar(<12 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v4, s8
; SI-NEXT: v_mov_b32_e32 v5, s9
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB51_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9
-; SI-NEXT: s_branch .LBB51_2
;
; VI-LABEL: bitcast_v12i16_to_v3f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB51_4
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB51_3
; VI-NEXT: .LBB51_2: ; %cmp.true
@@ -7090,16 +6839,14 @@ define inreg <3 x double> @bitcast_v12i16_to_v3f64_scalar(<12 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_4:
-; VI-NEXT: s_branch .LBB51_2
;
; GFX9-LABEL: bitcast_v12i16_to_v3f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB51_4
+; GFX9-NEXT: s_cbranch_execnz .LBB51_3
; GFX9-NEXT: .LBB51_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v4, s20, 3 op_sel_hi:[1,0]
@@ -7109,8 +6856,6 @@ define inreg <3 x double> @bitcast_v12i16_to_v3f64_scalar(<12 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB51_3:
-; GFX9-NEXT: s_branch .LBB51_2
-; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -7126,10 +6871,10 @@ define inreg <3 x double> @bitcast_v12i16_to_v3f64_scalar(<12 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-NEXT: .LBB51_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
@@ -7139,8 +6884,6 @@ define inreg <3 x double> @bitcast_v12i16_to_v3f64_scalar(<12 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
-; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -7287,7 +7030,7 @@ define inreg <12 x half> @bitcast_v3f64_to_v12f16_scalar(<3 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB53_3
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s14, s21, 16
; SI-NEXT: s_lshr_b32 s13, s19, 16
@@ -7295,7 +7038,7 @@ define inreg <12 x half> @bitcast_v3f64_to_v12f16_scalar(<3 x double> inreg %a,
; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB53_4
+; SI-NEXT: s_cbranch_execnz .LBB53_3
; SI-NEXT: .LBB53_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
@@ -7306,16 +7049,8 @@ define inreg <12 x half> @bitcast_v3f64_to_v12f16_scalar(<3 x double> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v1
-; SI-NEXT: s_branch .LBB53_5
+; SI-NEXT: s_branch .LBB53_4
; SI-NEXT: .LBB53_3:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: s_branch .LBB53_2
-; SI-NEXT: .LBB53_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v4, s20
@@ -7328,7 +7063,7 @@ define inreg <12 x half> @bitcast_v3f64_to_v12f16_scalar(<3 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v8, s8
; SI-NEXT: v_mov_b32_e32 v7, s6
; SI-NEXT: v_mov_b32_e32 v6, s4
-; SI-NEXT: .LBB53_5: ; %end
+; SI-NEXT: .LBB53_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -7353,17 +7088,15 @@ define inreg <12 x half> @bitcast_v3f64_to_v12f16_scalar(<3 x double> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB53_3
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB53_4
+; VI-NEXT: s_cbranch_execnz .LBB53_3
; VI-NEXT: .LBB53_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB53_3:
-; VI-NEXT: s_branch .LBB53_2
-; VI-NEXT: .LBB53_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -7378,17 +7111,15 @@ define inreg <12 x half> @bitcast_v3f64_to_v12f16_scalar(<3 x double> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB53_4
+; GFX9-NEXT: s_cbranch_execnz .LBB53_3
; GFX9-NEXT: .LBB53_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB53_3:
-; GFX9-NEXT: s_branch .LBB53_2
-; GFX9-NEXT: .LBB53_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -7406,18 +7137,16 @@ define inreg <12 x half> @bitcast_v3f64_to_v12f16_scalar(<3 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-NEXT: .LBB53_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB53_3:
-; GFX11-NEXT: s_branch .LBB53_2
-; GFX11-NEXT: .LBB53_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -7649,7 +7378,7 @@ define inreg <3 x double> @bitcast_v12f16_to_v3f64_scalar(<12 x half> inreg %a,
; SI-NEXT: s_lshr_b32 s23, s17, 16
; SI-NEXT: s_lshr_b32 s24, s16, 16
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB55_3
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s24, 16
@@ -7669,7 +7398,7 @@ define inreg <3 x double> @bitcast_v12f16_to_v3f64_scalar(<12 x half> inreg %a,
; SI-NEXT: s_and_b32 s9, s21, 0xffff
; SI-NEXT: s_lshl_b32 s10, s12, 16
; SI-NEXT: s_or_b32 s9, s9, s10
-; SI-NEXT: s_cbranch_execnz .LBB55_4
+; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s24
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -7721,9 +7450,6 @@ define inreg <3 x double> @bitcast_v12f16_to_v3f64_scalar(<12 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v5, v7, v5
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB55_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9
-; SI-NEXT: s_branch .LBB55_2
-; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -7736,9 +7462,9 @@ define inreg <3 x double> @bitcast_v12f16_to_v3f64_scalar(<12 x half> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_4
+; VI-NEXT: s_cbranch_execnz .LBB55_3
; VI-NEXT: .LBB55_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s21, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -7773,8 +7499,6 @@ define inreg <3 x double> @bitcast_v12f16_to_v3f64_scalar(<12 x half> inreg %a,
; VI-NEXT: v_or_b32_e32 v0, v0, v6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB55_3:
-; VI-NEXT: s_branch .LBB55_2
-; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -7787,9 +7511,9 @@ define inreg <3 x double> @bitcast_v12f16_to_v3f64_scalar(<12 x half> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
+; GFX9-NEXT: s_cbranch_execnz .LBB55_3
; GFX9-NEXT: .LBB55_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v5, s21, v0 op_sel_hi:[1,0]
@@ -7800,8 +7524,6 @@ define inreg <3 x double> @bitcast_v12f16_to_v3f64_scalar(<12 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
-; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -7817,10 +7539,10 @@ define inreg <3 x double> @bitcast_v12f16_to_v3f64_scalar(<12 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-NEXT: .LBB55_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
@@ -7830,8 +7552,6 @@ define inreg <3 x double> @bitcast_v12f16_to_v3f64_scalar(<12 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
-; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -8063,7 +7783,7 @@ define inreg <12 x half> @bitcast_v12i16_to_v12f16_scalar(<12 x i16> inreg %a, i
; SI-NEXT: s_lshr_b32 s24, s17, 16
; SI-NEXT: s_lshr_b32 s27, s16, 16
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB57_4
+; SI-NEXT: s_cbranch_scc0 .LBB57_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s5, s17, 0xffff
; SI-NEXT: s_lshl_b32 s7, s24, 16
@@ -8153,20 +7873,12 @@ define inreg <12 x half> @bitcast_v12i16_to_v12f16_scalar(<12 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v4, s4
; SI-NEXT: v_mov_b32_e32 v5, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB57_4:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: s_branch .LBB57_2
;
; VI-LABEL: bitcast_v12i16_to_v12f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB57_3
; VI-NEXT: .LBB57_2: ; %cmp.true
@@ -8208,16 +7920,14 @@ define inreg <12 x half> @bitcast_v12i16_to_v12f16_scalar(<12 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v12i16_to_v12f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
+; GFX9-NEXT: s_cbranch_execnz .LBB57_3
; GFX9-NEXT: .LBB57_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v4, s20, 3 op_sel_hi:[1,0]
@@ -8227,8 +7937,6 @@ define inreg <12 x half> @bitcast_v12i16_to_v12f16_scalar(<12 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
-; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -8246,10 +7954,10 @@ define inreg <12 x half> @bitcast_v12i16_to_v12f16_scalar(<12 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-NEXT: .LBB57_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
@@ -8259,8 +7967,6 @@ define inreg <12 x half> @bitcast_v12i16_to_v12f16_scalar(<12 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB57_3:
-; GFX11-NEXT: s_branch .LBB57_2
-; GFX11-NEXT: .LBB57_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -8469,9 +8175,9 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s6, s17, 16
; SI-NEXT: s_lshr_b32 s11, s16, 16
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB59_3
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB59_4
+; SI-NEXT: s_cbranch_execnz .LBB59_3
; SI-NEXT: .LBB59_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s11
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -8524,10 +8230,8 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i
; SI-NEXT: v_lshr_b64 v[8:9], v[2:3], 16
; SI-NEXT: v_lshr_b64 v[6:7], v[4:5], 16
; SI-NEXT: v_or_b32_e32 v4, v17, v4
-; SI-NEXT: s_branch .LBB59_5
+; SI-NEXT: s_branch .LBB59_4
; SI-NEXT: .LBB59_3:
-; SI-NEXT: s_branch .LBB59_2
-; SI-NEXT: .LBB59_4:
; SI-NEXT: v_mov_b32_e32 v13, s8
; SI-NEXT: v_mov_b32_e32 v15, s7
; SI-NEXT: v_mov_b32_e32 v16, s6
@@ -8540,7 +8244,7 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v10, s11
; SI-NEXT: v_mov_b32_e32 v8, s10
; SI-NEXT: v_mov_b32_e32 v6, s9
-; SI-NEXT: .LBB59_5: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v12
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v10
; SI-NEXT: v_or_b32_e32 v0, v0, v2
@@ -8565,9 +8269,9 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
+; VI-NEXT: s_cbranch_execnz .LBB59_3
; VI-NEXT: .LBB59_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -8602,8 +8306,6 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v6, v7
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
-; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -8618,9 +8320,9 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
+; GFX9-NEXT: s_cbranch_execnz .LBB59_3
; GFX9-NEXT: .LBB59_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v5, s21, v0 op_sel_hi:[1,0]
@@ -8631,8 +8333,6 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
-; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -8650,10 +8350,10 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-NEXT: .LBB59_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
@@ -8663,8 +8363,6 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB59_3:
-; GFX11-NEXT: s_branch .LBB59_2
-; GFX11-NEXT: .LBB59_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.224bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.224bit.ll
index e0fac42ac9d77..70d301a4604b4 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.224bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.224bit.ll
@@ -106,7 +106,7 @@ define inreg <7 x float> @bitcast_v7i32_to_v7f32_scalar(<7 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s23, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -126,14 +126,12 @@ define inreg <7 x float> @bitcast_v7i32_to_v7f32_scalar(<7 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v5, s21
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v7i32_to_v7f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -153,14 +151,12 @@ define inreg <7 x float> @bitcast_v7i32_to_v7f32_scalar(<7 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v7i32_to_v7f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -180,15 +176,13 @@ define inreg <7 x float> @bitcast_v7i32_to_v7f32_scalar(<7 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v7i32_to_v7f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -207,8 +201,6 @@ define inreg <7 x float> @bitcast_v7i32_to_v7f32_scalar(<7 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_mov_b32_e32 v6, s18
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -322,9 +314,9 @@ define inreg <7 x i32> @bitcast_v7f32_to_v7i32_scalar(<7 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s23, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v6, s22, 1.0
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -335,8 +327,6 @@ define inreg <7 x i32> @bitcast_v7f32_to_v7i32_scalar(<7 x float> inreg %a, i32
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -350,9 +340,9 @@ define inreg <7 x i32> @bitcast_v7f32_to_v7i32_scalar(<7 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -363,8 +353,6 @@ define inreg <7 x i32> @bitcast_v7f32_to_v7i32_scalar(<7 x float> inreg %a, i32
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -378,9 +366,9 @@ define inreg <7 x i32> @bitcast_v7f32_to_v7i32_scalar(<7 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -391,8 +379,6 @@ define inreg <7 x i32> @bitcast_v7f32_to_v7i32_scalar(<7 x float> inreg %a, i32
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -410,10 +396,10 @@ define inreg <7 x i32> @bitcast_v7f32_to_v7i32_scalar(<7 x float> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
; GFX11-NEXT: s_mov_b32 s7, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v6, s6, 1.0
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
@@ -424,8 +410,6 @@ define inreg <7 x i32> @bitcast_v7f32_to_v7i32_scalar(<7 x float> inreg %a, i32
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -593,7 +577,7 @@ define inreg <14 x i16> @bitcast_v7i32_to_v14i16_scalar(<7 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s23, 0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s23, s17, 16
; SI-NEXT: s_lshr_b32 s14, s21, 16
@@ -648,21 +632,12 @@ define inreg <14 x i16> @bitcast_v7i32_to_v14i16_scalar(<7 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v5, s10
; SI-NEXT: v_mov_b32_e32 v6, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr23
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v7i32_to_v14i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -682,14 +657,12 @@ define inreg <14 x i16> @bitcast_v7i32_to_v14i16_scalar(<7 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v7i32_to_v14i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -709,15 +682,13 @@ define inreg <14 x i16> @bitcast_v7i32_to_v14i16_scalar(<7 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v7i32_to_v14i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -736,8 +707,6 @@ define inreg <14 x i16> @bitcast_v7i32_to_v14i16_scalar(<7 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_mov_b32_e32 v6, s18
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -958,7 +927,7 @@ define inreg <7 x i32> @bitcast_v14i16_to_v7i32_scalar(<14 x i16> inreg %a, i32
; SI-NEXT: s_lshr_b32 s26, s17, 16
; SI-NEXT: s_lshr_b32 s27, s16, 16
; SI-NEXT: s_cmp_lg_u32 s23, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s27, 16
@@ -1027,15 +996,12 @@ define inreg <7 x i32> @bitcast_v14i16_to_v7i32_scalar(<14 x i16> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v5, s9
; SI-NEXT: v_mov_b32_e32 v6, s10
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v14i16_to_v7i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -1083,16 +1049,14 @@ define inreg <7 x i32> @bitcast_v14i16_to_v7i32_scalar(<14 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v14i16_to_v7i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_4
+; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
@@ -1103,8 +1067,6 @@ define inreg <7 x i32> @bitcast_v14i16_to_v7i32_scalar(<14 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB7_3:
-; GFX9-NEXT: s_branch .LBB7_2
-; GFX9-NEXT: .LBB7_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1122,10 +1084,10 @@ define inreg <7 x i32> @bitcast_v14i16_to_v7i32_scalar(<14 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
; GFX11-NEXT: s_mov_b32 s7, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
-; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
; GFX11-NEXT: .LBB7_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
@@ -1136,8 +1098,6 @@ define inreg <7 x i32> @bitcast_v14i16_to_v7i32_scalar(<14 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: s_branch .LBB7_2
-; GFX11-NEXT: .LBB7_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -1305,7 +1265,7 @@ define inreg <14 x half> @bitcast_v7i32_to_v14f16_scalar(<7 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s23, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s23, s17, 16
; SI-NEXT: s_lshr_b32 s14, s21, 16
@@ -1360,21 +1320,12 @@ define inreg <14 x half> @bitcast_v7i32_to_v14f16_scalar(<7 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v5, s10
; SI-NEXT: v_mov_b32_e32 v6, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr23
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v7i32_to_v14f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -1394,14 +1345,12 @@ define inreg <14 x half> @bitcast_v7i32_to_v14f16_scalar(<7 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v7i32_to_v14f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -1421,15 +1370,13 @@ define inreg <14 x half> @bitcast_v7i32_to_v14f16_scalar(<7 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v7i32_to_v14f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -1448,8 +1395,6 @@ define inreg <14 x half> @bitcast_v7i32_to_v14f16_scalar(<7 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_mov_b32_e32 v6, s18
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1698,7 +1643,7 @@ define inreg <7 x i32> @bitcast_v14f16_to_v7i32_scalar(<14 x half> inreg %a, i32
; SI-NEXT: s_lshr_b32 s26, s17, 16
; SI-NEXT: s_lshr_b32 s27, s16, 16
; SI-NEXT: s_cmp_lg_u32 s23, 0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s27, 16
@@ -1721,7 +1666,7 @@ define inreg <7 x i32> @bitcast_v14f16_to_v7i32_scalar(<14 x half> inreg %a, i32
; SI-NEXT: s_and_b32 s10, s22, 0xffff
; SI-NEXT: s_lshl_b32 s12, s11, 16
; SI-NEXT: s_or_b32 s10, s10, s12
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s27
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -1781,9 +1726,6 @@ define inreg <7 x i32> @bitcast_v14f16_to_v7i32_scalar(<14 x half> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v6, v8, v6
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB11_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -1797,9 +1739,9 @@ define inreg <7 x i32> @bitcast_v14f16_to_v7i32_scalar(<14 x half> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s22, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -1839,8 +1781,6 @@ define inreg <7 x i32> @bitcast_v14f16_to_v7i32_scalar(<14 x half> inreg %a, i32
; VI-NEXT: v_or_b32_e32 v0, v0, v7
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1854,9 +1794,9 @@ define inreg <7 x i32> @bitcast_v14f16_to_v7i32_scalar(<14 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v6, s22, v0 op_sel_hi:[1,0]
@@ -1868,8 +1808,6 @@ define inreg <7 x i32> @bitcast_v14f16_to_v7i32_scalar(<14 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1887,10 +1825,10 @@ define inreg <7 x i32> @bitcast_v14f16_to_v7i32_scalar(<14 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
; GFX11-NEXT: s_mov_b32 s7, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
@@ -1901,8 +1839,6 @@ define inreg <7 x i32> @bitcast_v14f16_to_v7i32_scalar(<14 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -2066,7 +2002,7 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s23, 0
-; SI-NEXT: s_cbranch_scc0 .LBB13_3
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s23, s21, 16
; SI-NEXT: s_lshr_b32 s15, s19, 16
@@ -2075,7 +2011,7 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3
; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB13_4
+; SI-NEXT: s_cbranch_execnz .LBB13_3
; SI-NEXT: .LBB13_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
; SI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -2091,17 +2027,8 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3
; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v1
-; SI-NEXT: s_branch .LBB13_5
+; SI-NEXT: s_branch .LBB13_4
; SI-NEXT: .LBB13_3:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr23
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB13_2
-; SI-NEXT: .LBB13_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -2116,7 +2043,7 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v11, s10
; SI-NEXT: v_mov_b32_e32 v10, s8
; SI-NEXT: v_mov_b32_e32 v7, s4
-; SI-NEXT: .LBB13_5: ; %end
+; SI-NEXT: .LBB13_4: ; %end
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4
@@ -2144,9 +2071,9 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_3
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB13_4
+; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -2157,8 +2084,6 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB13_3:
-; VI-NEXT: s_branch .LBB13_2
-; VI-NEXT: .LBB13_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2173,9 +2098,9 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB13_4
+; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -2186,8 +2111,6 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB13_3:
-; GFX9-NEXT: s_branch .LBB13_2
-; GFX9-NEXT: .LBB13_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2206,10 +2129,10 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
; GFX11-NEXT: .LBB13_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v6, s6, 1.0
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
@@ -2220,8 +2143,6 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB13_3:
-; GFX11-NEXT: s_branch .LBB13_2
-; GFX11-NEXT: .LBB13_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -2447,7 +2368,7 @@ define inreg <7 x float> @bitcast_v14i16_to_v7f32_scalar(<14 x i16> inreg %a, i3
; SI-NEXT: s_lshr_b32 s26, s17, 16
; SI-NEXT: s_lshr_b32 s27, s16, 16
; SI-NEXT: s_cmp_lg_u32 s23, 0
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s27, 16
@@ -2516,15 +2437,12 @@ define inreg <7 x float> @bitcast_v14i16_to_v7f32_scalar(<14 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v5, s9
; SI-NEXT: v_mov_b32_e32 v6, s10
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v14i16_to_v7f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB15_3
; VI-NEXT: .LBB15_2: ; %cmp.true
@@ -2572,16 +2490,14 @@ define inreg <7 x float> @bitcast_v14i16_to_v7f32_scalar(<14 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v14i16_to_v7f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
@@ -2592,8 +2508,6 @@ define inreg <7 x float> @bitcast_v14i16_to_v7f32_scalar(<14 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2611,10 +2525,10 @@ define inreg <7 x float> @bitcast_v14i16_to_v7f32_scalar(<14 x i16> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
; GFX11-NEXT: s_mov_b32 s7, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
@@ -2625,8 +2539,6 @@ define inreg <7 x float> @bitcast_v14i16_to_v7f32_scalar(<14 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -2790,7 +2702,7 @@ define inreg <14 x half> @bitcast_v7f32_to_v14f16_scalar(<7 x float> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s23, 0
-; SI-NEXT: s_cbranch_scc0 .LBB17_3
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s23, s21, 16
; SI-NEXT: s_lshr_b32 s15, s19, 16
@@ -2799,7 +2711,7 @@ define inreg <14 x half> @bitcast_v7f32_to_v14f16_scalar(<7 x float> inreg %a, i
; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB17_4
+; SI-NEXT: s_cbranch_execnz .LBB17_3
; SI-NEXT: .LBB17_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
; SI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -2815,17 +2727,8 @@ define inreg <14 x half> @bitcast_v7f32_to_v14f16_scalar(<7 x float> inreg %a, i
; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v1
-; SI-NEXT: s_branch .LBB17_5
+; SI-NEXT: s_branch .LBB17_4
; SI-NEXT: .LBB17_3:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr23
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB17_2
-; SI-NEXT: .LBB17_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -2840,7 +2743,7 @@ define inreg <14 x half> @bitcast_v7f32_to_v14f16_scalar(<7 x float> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v11, s10
; SI-NEXT: v_mov_b32_e32 v10, s8
; SI-NEXT: v_mov_b32_e32 v7, s4
-; SI-NEXT: .LBB17_5: ; %end
+; SI-NEXT: .LBB17_4: ; %end
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4
@@ -2868,9 +2771,9 @@ define inreg <14 x half> @bitcast_v7f32_to_v14f16_scalar(<7 x float> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_3
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB17_4
+; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -2881,8 +2784,6 @@ define inreg <14 x half> @bitcast_v7f32_to_v14f16_scalar(<7 x float> inreg %a, i
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB17_3:
-; VI-NEXT: s_branch .LBB17_2
-; VI-NEXT: .LBB17_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2897,9 +2798,9 @@ define inreg <14 x half> @bitcast_v7f32_to_v14f16_scalar(<7 x float> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB17_4
+; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -2910,8 +2811,6 @@ define inreg <14 x half> @bitcast_v7f32_to_v14f16_scalar(<7 x float> inreg %a, i
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB17_3:
-; GFX9-NEXT: s_branch .LBB17_2
-; GFX9-NEXT: .LBB17_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2930,10 +2829,10 @@ define inreg <14 x half> @bitcast_v7f32_to_v14f16_scalar(<7 x float> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
; GFX11-NEXT: .LBB17_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v6, s6, 1.0
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
@@ -2944,8 +2843,6 @@ define inreg <14 x half> @bitcast_v7f32_to_v14f16_scalar(<7 x float> inreg %a, i
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB17_3:
-; GFX11-NEXT: s_branch .LBB17_2
-; GFX11-NEXT: .LBB17_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -3199,7 +3096,7 @@ define inreg <7 x float> @bitcast_v14f16_to_v7f32_scalar(<14 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s26, s17, 16
; SI-NEXT: s_lshr_b32 s27, s16, 16
; SI-NEXT: s_cmp_lg_u32 s23, 0
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s27, 16
@@ -3222,7 +3119,7 @@ define inreg <7 x float> @bitcast_v14f16_to_v7f32_scalar(<14 x half> inreg %a, i
; SI-NEXT: s_and_b32 s10, s22, 0xffff
; SI-NEXT: s_lshl_b32 s12, s11, 16
; SI-NEXT: s_or_b32 s10, s10, s12
-; SI-NEXT: s_cbranch_execnz .LBB19_4
+; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s27
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -3282,9 +3179,6 @@ define inreg <7 x float> @bitcast_v14f16_to_v7f32_scalar(<14 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v6, v8, v6
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10
-; SI-NEXT: s_branch .LBB19_2
-; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -3298,9 +3192,9 @@ define inreg <7 x float> @bitcast_v14f16_to_v7f32_scalar(<14 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
+; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s22, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -3340,8 +3234,6 @@ define inreg <7 x float> @bitcast_v14f16_to_v7f32_scalar(<14 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v0, v7
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
-; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -3355,9 +3247,9 @@ define inreg <7 x float> @bitcast_v14f16_to_v7f32_scalar(<14 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v6, s22, v0 op_sel_hi:[1,0]
@@ -3369,8 +3261,6 @@ define inreg <7 x float> @bitcast_v14f16_to_v7f32_scalar(<14 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -3388,10 +3278,10 @@ define inreg <7 x float> @bitcast_v14f16_to_v7f32_scalar(<14 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
; GFX11-NEXT: s_mov_b32 s7, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
@@ -3402,8 +3292,6 @@ define inreg <7 x float> @bitcast_v14f16_to_v7f32_scalar(<14 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -3658,7 +3546,7 @@ define inreg <14 x half> @bitcast_v14i16_to_v14f16_scalar(<14 x i16> inreg %a, i
; SI-NEXT: s_lshr_b32 s27, s17, 16
; SI-NEXT: s_lshr_b32 s40, s16, 16
; SI-NEXT: s_cmp_lg_u32 s23, 0
-; SI-NEXT: s_cbranch_scc0 .LBB21_4
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s5, s22, 0xffff
; SI-NEXT: s_lshl_b32 s7, s26, 16
@@ -3761,21 +3649,12 @@ define inreg <14 x half> @bitcast_v14i16_to_v14f16_scalar(<14 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v5, s5
; SI-NEXT: v_mov_b32_e32 v6, s10
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_4:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr23
-; SI-NEXT: s_branch .LBB21_2
;
; VI-LABEL: bitcast_v14i16_to_v14f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
@@ -3823,16 +3702,14 @@ define inreg <14 x half> @bitcast_v14i16_to_v14f16_scalar(<14 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v14i16_to_v14f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
+; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
@@ -3843,8 +3720,6 @@ define inreg <14 x half> @bitcast_v14i16_to_v14f16_scalar(<14 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
-; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -3863,10 +3738,10 @@ define inreg <14 x half> @bitcast_v14i16_to_v14f16_scalar(<14 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
; GFX11-NEXT: .LBB21_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
@@ -3877,8 +3752,6 @@ define inreg <14 x half> @bitcast_v14i16_to_v14f16_scalar(<14 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
-; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -4106,9 +3979,9 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s6, s17, 16
; SI-NEXT: s_lshr_b32 s12, s16, 16
; SI-NEXT: s_cmp_lg_u32 s23, 0
-; SI-NEXT: s_cbranch_scc0 .LBB23_3
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_4
+; SI-NEXT: s_cbranch_execnz .LBB23_3
; SI-NEXT: .LBB23_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s12
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -4169,10 +4042,8 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i
; SI-NEXT: v_lshr_b64 v[8:9], v[2:3], 16
; SI-NEXT: v_lshr_b64 v[6:7], v[4:5], 16
; SI-NEXT: v_or_b32_e32 v4, v19, v4
-; SI-NEXT: s_branch .LBB23_5
+; SI-NEXT: s_branch .LBB23_4
; SI-NEXT: .LBB23_3:
-; SI-NEXT: s_branch .LBB23_2
-; SI-NEXT: .LBB23_4:
; SI-NEXT: v_mov_b32_e32 v12, s10
; SI-NEXT: v_mov_b32_e32 v15, s8
; SI-NEXT: v_mov_b32_e32 v17, s7
@@ -4187,7 +4058,7 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v10, s12
; SI-NEXT: v_mov_b32_e32 v8, s11
; SI-NEXT: v_mov_b32_e32 v6, s9
-; SI-NEXT: .LBB23_5: ; %end
+; SI-NEXT: .LBB23_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v13
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v10
; SI-NEXT: v_or_b32_e32 v0, v0, v2
@@ -4215,9 +4086,9 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_3
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_4
+; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -4257,8 +4128,6 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v7, v8
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB23_3:
-; VI-NEXT: s_branch .LBB23_2
-; VI-NEXT: .LBB23_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -4273,9 +4142,9 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_4
+; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v6, s22, v0 op_sel_hi:[1,0]
@@ -4287,8 +4156,6 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB23_3:
-; GFX9-NEXT: s_branch .LBB23_2
-; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -4307,10 +4174,10 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
; GFX11-NEXT: .LBB23_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
@@ -4321,8 +4188,6 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
-; GFX11-NEXT: .LBB23_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
index d917cb0d623bc..0dee25b4d6d99 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
@@ -110,7 +110,7 @@ define inreg <8 x float> @bitcast_v8i32_to_v8f32_scalar(<8 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -132,14 +132,12 @@ define inreg <8 x float> @bitcast_v8i32_to_v8f32_scalar(<8 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v8i32_to_v8f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -161,14 +159,12 @@ define inreg <8 x float> @bitcast_v8i32_to_v8f32_scalar(<8 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v8i32_to_v8f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -190,15 +186,13 @@ define inreg <8 x float> @bitcast_v8i32_to_v8f32_scalar(<8 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v8i32_to_v8f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -218,8 +212,6 @@ define inreg <8 x float> @bitcast_v8i32_to_v8f32_scalar(<8 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -336,9 +328,9 @@ define inreg <8 x i32> @bitcast_v8f32_to_v8i32_scalar(<8 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v7, s23, 1.0
; SI-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -350,8 +342,6 @@ define inreg <8 x i32> @bitcast_v8f32_to_v8i32_scalar(<8 x float> inreg %a, i32
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -366,9 +356,9 @@ define inreg <8 x i32> @bitcast_v8f32_to_v8i32_scalar(<8 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -380,8 +370,6 @@ define inreg <8 x i32> @bitcast_v8f32_to_v8i32_scalar(<8 x float> inreg %a, i32
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -396,9 +384,9 @@ define inreg <8 x i32> @bitcast_v8f32_to_v8i32_scalar(<8 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -410,8 +398,6 @@ define inreg <8 x i32> @bitcast_v8f32_to_v8i32_scalar(<8 x float> inreg %a, i32
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -431,10 +417,10 @@ define inreg <8 x i32> @bitcast_v8f32_to_v8i32_scalar(<8 x float> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v7, s7, 1.0
; GFX11-NEXT: v_add_f32_e64 v6, s6, 1.0
@@ -446,8 +432,6 @@ define inreg <8 x i32> @bitcast_v8f32_to_v8i32_scalar(<8 x float> inreg %a, i32
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -574,7 +558,7 @@ define inreg <4 x i64> @bitcast_v8i32_to_v4i64_scalar(<8 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB5_3
; SI-NEXT: .LBB5_2: ; %cmp.true
@@ -596,14 +580,12 @@ define inreg <4 x i64> @bitcast_v8i32_to_v4i64_scalar(<8 x i32> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v8i32_to_v4i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -625,14 +607,12 @@ define inreg <4 x i64> @bitcast_v8i32_to_v4i64_scalar(<8 x i32> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v8i32_to_v4i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -654,15 +634,13 @@ define inreg <4 x i64> @bitcast_v8i32_to_v4i64_scalar(<8 x i32> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v8i32_to_v4i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -682,8 +660,6 @@ define inreg <4 x i64> @bitcast_v8i32_to_v4i64_scalar(<8 x i32> inreg %a, i32 in
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -807,7 +783,7 @@ define inreg <8 x i32> @bitcast_v4i64_to_v8i32_scalar(<4 x i64> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB7_3
; SI-NEXT: .LBB7_2: ; %cmp.true
@@ -829,14 +805,12 @@ define inreg <8 x i32> @bitcast_v4i64_to_v8i32_scalar(<4 x i64> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v4i64_to_v8i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -858,14 +832,12 @@ define inreg <8 x i32> @bitcast_v4i64_to_v8i32_scalar(<4 x i64> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v4i64_to_v8i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
@@ -887,15 +859,13 @@ define inreg <8 x i32> @bitcast_v4i64_to_v8i32_scalar(<4 x i64> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v4i64_to_v8i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
@@ -915,8 +885,6 @@ define inreg <8 x i32> @bitcast_v4i64_to_v8i32_scalar(<4 x i64> inreg %a, i32 in
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1038,7 +1006,7 @@ define inreg <4 x double> @bitcast_v8i32_to_v4f64_scalar(<8 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB9_3
; SI-NEXT: .LBB9_2: ; %cmp.true
@@ -1060,14 +1028,12 @@ define inreg <4 x double> @bitcast_v8i32_to_v4f64_scalar(<8 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v8i32_to_v4f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -1089,14 +1055,12 @@ define inreg <4 x double> @bitcast_v8i32_to_v4f64_scalar(<8 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v8i32_to_v4f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -1118,15 +1082,13 @@ define inreg <4 x double> @bitcast_v8i32_to_v4f64_scalar(<8 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v8i32_to_v4f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -1146,8 +1108,6 @@ define inreg <4 x double> @bitcast_v8i32_to_v4f64_scalar(<8 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1255,9 +1215,9 @@ define inreg <8 x i32> @bitcast_v4f64_to_v8i32_scalar(<4 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -1265,8 +1225,6 @@ define inreg <8 x i32> @bitcast_v4f64_to_v8i32_scalar(<4 x double> inreg %a, i32
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB11_3:
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -1281,9 +1239,9 @@ define inreg <8 x i32> @bitcast_v4f64_to_v8i32_scalar(<4 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -1291,8 +1249,6 @@ define inreg <8 x i32> @bitcast_v4f64_to_v8i32_scalar(<4 x double> inreg %a, i32
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1307,9 +1263,9 @@ define inreg <8 x i32> @bitcast_v4f64_to_v8i32_scalar(<4 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -1317,8 +1273,6 @@ define inreg <8 x i32> @bitcast_v4f64_to_v8i32_scalar(<4 x double> inreg %a, i32
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1338,10 +1292,10 @@ define inreg <8 x i32> @bitcast_v4f64_to_v8i32_scalar(<4 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[6:7], s[6:7], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
@@ -1349,8 +1303,6 @@ define inreg <8 x i32> @bitcast_v4f64_to_v8i32_scalar(<4 x double> inreg %a, i32
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -1528,7 +1480,7 @@ define inreg <16 x i16> @bitcast_v8i32_to_v16i16_scalar(<8 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB13_4
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s14, s23, 16
; SI-NEXT: s_lshr_b32 s15, s21, 16
@@ -1590,22 +1542,12 @@ define inreg <16 x i16> @bitcast_v8i32_to_v16i16_scalar(<8 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s4
; SI-NEXT: v_mov_b32_e32 v7, s11
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB13_4:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: s_branch .LBB13_2
;
; VI-LABEL: bitcast_v8i32_to_v16i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
@@ -1627,14 +1569,12 @@ define inreg <16 x i16> @bitcast_v8i32_to_v16i16_scalar(<8 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v8i32_to_v16i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
@@ -1656,15 +1596,13 @@ define inreg <16 x i16> @bitcast_v8i32_to_v16i16_scalar(<8 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v8i32_to_v16i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
@@ -1684,8 +1622,6 @@ define inreg <16 x i16> @bitcast_v8i32_to_v16i16_scalar(<8 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1923,7 +1859,7 @@ define inreg <8 x i32> @bitcast_v16i16_to_v8i32_scalar(<16 x i16> inreg %a, i32
; SI-NEXT: s_lshr_b32 s29, s17, 16
; SI-NEXT: s_lshr_b32 s40, s16, 16
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s40, 16
@@ -2001,15 +1937,12 @@ define inreg <8 x i32> @bitcast_v16i16_to_v8i32_scalar(<16 x i16> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s10
; SI-NEXT: v_mov_b32_e32 v7, s11
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v16i16_to_v8i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB15_3
; VI-NEXT: .LBB15_2: ; %cmp.true
@@ -2063,16 +1996,14 @@ define inreg <8 x i32> @bitcast_v16i16_to_v8i32_scalar(<16 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v16i16_to_v8i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
@@ -2084,8 +2015,6 @@ define inreg <8 x i32> @bitcast_v16i16_to_v8i32_scalar(<16 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2105,10 +2034,10 @@ define inreg <8 x i32> @bitcast_v16i16_to_v8i32_scalar(<16 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
@@ -2120,8 +2049,6 @@ define inreg <8 x i32> @bitcast_v16i16_to_v8i32_scalar(<16 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -2299,7 +2226,7 @@ define inreg <16 x half> @bitcast_v8i32_to_v16f16_scalar(<8 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB17_4
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s14, s23, 16
; SI-NEXT: s_lshr_b32 s15, s21, 16
@@ -2361,22 +2288,12 @@ define inreg <16 x half> @bitcast_v8i32_to_v16f16_scalar(<8 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s4
; SI-NEXT: v_mov_b32_e32 v7, s11
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB17_4:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: s_branch .LBB17_2
;
; VI-LABEL: bitcast_v8i32_to_v16f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
@@ -2398,14 +2315,12 @@ define inreg <16 x half> @bitcast_v8i32_to_v16f16_scalar(<8 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v8i32_to_v16f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
@@ -2427,15 +2342,13 @@ define inreg <16 x half> @bitcast_v8i32_to_v16f16_scalar(<8 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v8i32_to_v16f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
@@ -2455,8 +2368,6 @@ define inreg <16 x half> @bitcast_v8i32_to_v16f16_scalar(<8 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2726,7 +2637,7 @@ define inreg <8 x i32> @bitcast_v16f16_to_v8i32_scalar(<16 x half> inreg %a, i32
; SI-NEXT: s_lshr_b32 s29, s17, 16
; SI-NEXT: s_lshr_b32 s40, s16, 16
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s40, 16
@@ -2752,7 +2663,7 @@ define inreg <8 x i32> @bitcast_v16f16_to_v8i32_scalar(<16 x half> inreg %a, i32
; SI-NEXT: s_and_b32 s11, s23, 0xffff
; SI-NEXT: s_lshl_b32 s12, s14, 16
; SI-NEXT: s_or_b32 s11, s11, s12
-; SI-NEXT: s_cbranch_execnz .LBB19_4
+; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s40
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -2820,9 +2731,6 @@ define inreg <8 x i32> @bitcast_v16f16_to_v8i32_scalar(<16 x half> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v7, v9, v7
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11
-; SI-NEXT: s_branch .LBB19_2
-; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -2837,9 +2745,9 @@ define inreg <8 x i32> @bitcast_v16f16_to_v8i32_scalar(<16 x half> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
+; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s23, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -2884,8 +2792,6 @@ define inreg <8 x i32> @bitcast_v16f16_to_v8i32_scalar(<16 x half> inreg %a, i32
; VI-NEXT: v_or_b32_e32 v0, v0, v8
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
-; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2900,9 +2806,9 @@ define inreg <8 x i32> @bitcast_v16f16_to_v8i32_scalar(<16 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v7, s23, v0 op_sel_hi:[1,0]
@@ -2915,8 +2821,6 @@ define inreg <8 x i32> @bitcast_v16f16_to_v8i32_scalar(<16 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2936,10 +2840,10 @@ define inreg <8 x i32> @bitcast_v16f16_to_v8i32_scalar(<16 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
@@ -2951,8 +2855,6 @@ define inreg <8 x i32> @bitcast_v16f16_to_v8i32_scalar(<16 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -3171,7 +3073,7 @@ define inreg <16 x bfloat> @bitcast_v8i32_to_v16bf16_scalar(<8 x i32> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB21_4
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s7, s23, 0xffff0000
; SI-NEXT: s_lshl_b32 s6, s23, 16
@@ -3249,30 +3151,12 @@ define inreg <16 x bfloat> @bitcast_v8i32_to_v16bf16_scalar(<8 x i32> inreg %a,
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s6
; SI-NEXT: v_lshr_b64 v[7:8], v[7:8], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_4:
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB21_2
;
; VI-LABEL: bitcast_v8i32_to_v16bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
@@ -3294,14 +3178,12 @@ define inreg <16 x bfloat> @bitcast_v8i32_to_v16bf16_scalar(<8 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v8i32_to_v16bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
@@ -3323,15 +3205,13 @@ define inreg <16 x bfloat> @bitcast_v8i32_to_v16bf16_scalar(<8 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_v8i32_to_v16bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
@@ -3351,8 +3231,6 @@ define inreg <16 x bfloat> @bitcast_v8i32_to_v16bf16_scalar(<8 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_4:
-; GFX11-NEXT: s_branch .LBB21_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4151,7 +4029,7 @@ define inreg <8 x i32> @bitcast_v16bf16_to_v8i32_scalar(<16 x bfloat> inreg %a,
; SI-NEXT: v_mul_f32_e64 v25, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v11, 1.0, s7
; SI-NEXT: v_mul_f32_e64 v9, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v32
; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v31
@@ -4221,17 +4099,14 @@ define inreg <8 x i32> @bitcast_v16bf16_to_v8i32_scalar(<16 x bfloat> inreg %a,
; SI-NEXT: v_lshr_b64 v[7:8], v[7:8], 16
; SI-NEXT: .LBB23_3: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v16bf16_to_v8i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_3
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_4
+; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s23, 16
; VI-NEXT: v_mov_b32_e32 v8, 0x40c00000
@@ -4383,8 +4258,6 @@ define inreg <8 x i32> @bitcast_v16bf16_to_v8i32_scalar(<16 x bfloat> inreg %a,
; VI-NEXT: v_mov_b32_e32 v5, v8
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB23_3:
-; VI-NEXT: s_branch .LBB23_2
-; VI-NEXT: .LBB23_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -4399,9 +4272,9 @@ define inreg <8 x i32> @bitcast_v16bf16_to_v8i32_scalar(<16 x bfloat> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_4
+; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s23, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -4559,8 +4432,6 @@ define inreg <8 x i32> @bitcast_v16bf16_to_v8i32_scalar(<16 x bfloat> inreg %a,
; GFX9-NEXT: v_lshl_or_b32 v0, v9, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB23_3:
-; GFX9-NEXT: s_branch .LBB23_2
-; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -4580,10 +4451,10 @@ define inreg <8 x i32> @bitcast_v16bf16_to_v8i32_scalar(<16 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: s_mov_b32 s4, s16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB23_3
; GFX11-TRUE16-NEXT: .LBB23_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s8, s7, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
@@ -4744,8 +4615,6 @@ define inreg <8 x i32> @bitcast_v16bf16_to_v8i32_scalar(<16 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB23_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB23_2
-; GFX11-TRUE16-NEXT: .LBB23_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -4761,10 +4630,10 @@ define inreg <8 x i32> @bitcast_v16bf16_to_v8i32_scalar(<16 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: s_mov_b32 s4, s16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB23_3
; GFX11-FAKE16-NEXT: .LBB23_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s7, 16
; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff0000
@@ -4942,8 +4811,6 @@ define inreg <8 x i32> @bitcast_v16bf16_to_v8i32_scalar(<16 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v12, 16, v9
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB23_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB23_2
-; GFX11-FAKE16-NEXT: .LBB23_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -5516,7 +5383,7 @@ define inreg <32 x i8> @bitcast_v8i32_to_v32i8_scalar(<8 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB25_4
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s56, s23, 24
; SI-NEXT: s_lshr_b32 s57, s23, 16
@@ -5610,38 +5477,12 @@ define inreg <32 x i8> @bitcast_v8i32_to_v32i8_scalar(<8 x i32> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v30, s57
; SI-NEXT: v_mov_b32_e32 v31, s56
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB25_4:
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: s_branch .LBB25_2
;
; VI-LABEL: bitcast_v8i32_to_v32i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB25_4
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s14, s23, 24
; VI-NEXT: s_lshr_b32 s15, s23, 16
@@ -5735,38 +5576,12 @@ define inreg <32 x i8> @bitcast_v8i32_to_v32i8_scalar(<8 x i32> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v30, s15
; VI-NEXT: v_mov_b32_e32 v31, s14
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB25_4:
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr29
-; VI-NEXT: ; implicit-def: $sgpr28
-; VI-NEXT: ; implicit-def: $sgpr27
-; VI-NEXT: ; implicit-def: $sgpr26
-; VI-NEXT: ; implicit-def: $sgpr25
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr24
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB25_2
;
; GFX9-LABEL: bitcast_v8i32_to_v32i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s14, s23, 24
; GFX9-NEXT: s_lshr_b32 s15, s23, 16
@@ -5860,39 +5675,13 @@ define inreg <32 x i8> @bitcast_v8i32_to_v32i8_scalar(<8 x i32> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v30, s15
; GFX9-NEXT: v_mov_b32_e32 v31, s14
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB25_4:
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr29
-; GFX9-NEXT: ; implicit-def: $sgpr28
-; GFX9-NEXT: ; implicit-def: $sgpr27
-; GFX9-NEXT: ; implicit-def: $sgpr26
-; GFX9-NEXT: ; implicit-def: $sgpr25
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr24
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB25_2
;
; GFX11-LABEL: bitcast_v8i32_to_v32i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s46, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s12, s19, 24
; GFX11-NEXT: s_lshr_b32 s13, s19, 16
@@ -5972,32 +5761,6 @@ define inreg <32 x i8> @bitcast_v8i32_to_v32i8_scalar(<8 x i32> inreg %a, i32 in
; GFX11-NEXT: v_dual_mov_b32 v28, s19 :: v_dual_mov_b32 v29, s14
; GFX11-NEXT: v_dual_mov_b32 v30, s13 :: v_dual_mov_b32 v31, s12
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB25_4:
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr29
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr27
-; GFX11-NEXT: ; implicit-def: $sgpr26
-; GFX11-NEXT: ; implicit-def: $sgpr25
-; GFX11-NEXT: ; implicit-def: $sgpr24
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr23
-; GFX11-NEXT: ; implicit-def: $sgpr22
-; GFX11-NEXT: ; implicit-def: $sgpr21
-; GFX11-NEXT: ; implicit-def: $sgpr20
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: s_branch .LBB25_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6983,7 +6746,7 @@ define inreg <8 x i32> @bitcast_v32i8_to_v8i32_scalar(<32 x i8> inreg %a, i32 in
; SI-NEXT: v_readfirstlane_b32 s62, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s63, v0
-; SI-NEXT: s_cbranch_scc0 .LBB27_4
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -7173,9 +6936,6 @@ define inreg <8 x i32> @bitcast_v32i8_to_v8i32_scalar(<32 x i8> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v6, s10
; SI-NEXT: v_mov_b32_e32 v7, s11
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB27_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11
-; SI-NEXT: s_branch .LBB27_2
;
; VI-LABEL: bitcast_v32i8_to_v8i32_scalar:
; VI: ; %bb.0:
@@ -7200,7 +6960,7 @@ define inreg <8 x i32> @bitcast_v32i8_to_v8i32_scalar(<32 x i8> inreg %a, i32 in
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB27_4
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v7, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -7336,9 +7096,6 @@ define inreg <8 x i32> @bitcast_v32i8_to_v8i32_scalar(<32 x i8> inreg %a, i32 in
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v7
; VI-NEXT: .LBB27_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; VI-NEXT: s_branch .LBB27_2
;
; GFX9-LABEL: bitcast_v32i8_to_v8i32_scalar:
; GFX9: ; %bb.0:
@@ -7363,7 +7120,7 @@ define inreg <8 x i32> @bitcast_v32i8_to_v8i32_scalar(<32 x i8> inreg %a, i32 in
; GFX9-NEXT: v_readfirstlane_b32 s46, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s47, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v7, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -7492,9 +7249,6 @@ define inreg <8 x i32> @bitcast_v32i8_to_v8i32_scalar(<32 x i8> inreg %a, i32 in
; GFX9-NEXT: v_or_b32_sdwa v7, v8, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB27_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB27_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX9-NEXT: s_branch .LBB27_2
;
; GFX11-LABEL: bitcast_v32i8_to_v8i32_scalar:
; GFX11: ; %bb.0:
@@ -7516,7 +7270,7 @@ define inreg <8 x i32> @bitcast_v32i8_to_v8i32_scalar(<32 x i8> inreg %a, i32 in
; GFX11-NEXT: v_readfirstlane_b32 s41, v0
; GFX11-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-NEXT: s_mov_b32 s42, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v3, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -7630,9 +7384,6 @@ define inreg <8 x i32> @bitcast_v32i8_to_v8i32_scalar(<32 x i8> inreg %a, i32 in
; GFX11-NEXT: v_or_b32_e32 v7, v12, v13
; GFX11-NEXT: .LBB27_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB27_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-NEXT: s_branch .LBB27_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7749,9 +7500,9 @@ define inreg <4 x i64> @bitcast_v8f32_to_v4i64_scalar(<8 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB29_4
+; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v7, s23, 1.0
; SI-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -7763,8 +7514,6 @@ define inreg <4 x i64> @bitcast_v8f32_to_v4i64_scalar(<8 x float> inreg %a, i32
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB29_3:
-; SI-NEXT: s_branch .LBB29_2
-; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -7779,9 +7528,9 @@ define inreg <4 x i64> @bitcast_v8f32_to_v4i64_scalar(<8 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
+; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -7793,8 +7542,6 @@ define inreg <4 x i64> @bitcast_v8f32_to_v4i64_scalar(<8 x float> inreg %a, i32
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
-; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -7809,9 +7556,9 @@ define inreg <4 x i64> @bitcast_v8f32_to_v4i64_scalar(<8 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
+; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -7823,8 +7570,6 @@ define inreg <4 x i64> @bitcast_v8f32_to_v4i64_scalar(<8 x float> inreg %a, i32
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
-; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -7844,10 +7589,10 @@ define inreg <4 x i64> @bitcast_v8f32_to_v4i64_scalar(<8 x float> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-NEXT: .LBB29_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v7, s7, 1.0
; GFX11-NEXT: v_add_f32_e64 v6, s6, 1.0
@@ -7859,8 +7604,6 @@ define inreg <4 x i64> @bitcast_v8f32_to_v4i64_scalar(<8 x float> inreg %a, i32
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
-; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -7989,7 +7732,7 @@ define inreg <8 x float> @bitcast_v4i64_to_v8f32_scalar(<4 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB31_3
; SI-NEXT: .LBB31_2: ; %cmp.true
@@ -8011,14 +7754,12 @@ define inreg <8 x float> @bitcast_v4i64_to_v8f32_scalar(<4 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v4i64_to_v8f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB31_3
; VI-NEXT: .LBB31_2: ; %cmp.true
@@ -8040,14 +7781,12 @@ define inreg <8 x float> @bitcast_v4i64_to_v8f32_scalar(<4 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v4i64_to_v8f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
@@ -8069,15 +7808,13 @@ define inreg <8 x float> @bitcast_v4i64_to_v8f32_scalar(<4 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB31_4:
-; GFX9-NEXT: s_branch .LBB31_2
;
; GFX11-LABEL: bitcast_v4i64_to_v8f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
@@ -8097,8 +7834,6 @@ define inreg <8 x float> @bitcast_v4i64_to_v8f32_scalar(<4 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB31_4:
-; GFX11-NEXT: s_branch .LBB31_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8215,9 +7950,9 @@ define inreg <4 x double> @bitcast_v8f32_to_v4f64_scalar(<8 x float> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB33_4
+; SI-NEXT: s_cbranch_execnz .LBB33_3
; SI-NEXT: .LBB33_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v7, s23, 1.0
; SI-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -8229,8 +7964,6 @@ define inreg <4 x double> @bitcast_v8f32_to_v4f64_scalar(<8 x float> inreg %a, i
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB33_3:
-; SI-NEXT: s_branch .LBB33_2
-; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -8245,9 +7978,9 @@ define inreg <4 x double> @bitcast_v8f32_to_v4f64_scalar(<8 x float> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_4
+; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -8259,8 +7992,6 @@ define inreg <4 x double> @bitcast_v8f32_to_v4f64_scalar(<8 x float> inreg %a, i
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB33_3:
-; VI-NEXT: s_branch .LBB33_2
-; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -8275,9 +8006,9 @@ define inreg <4 x double> @bitcast_v8f32_to_v4f64_scalar(<8 x float> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
+; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -8289,8 +8020,6 @@ define inreg <4 x double> @bitcast_v8f32_to_v4f64_scalar(<8 x float> inreg %a, i
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: s_branch .LBB33_2
-; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -8310,10 +8039,10 @@ define inreg <4 x double> @bitcast_v8f32_to_v4f64_scalar(<8 x float> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-NEXT: .LBB33_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v7, s7, 1.0
; GFX11-NEXT: v_add_f32_e64 v6, s6, 1.0
@@ -8325,8 +8054,6 @@ define inreg <4 x double> @bitcast_v8f32_to_v4f64_scalar(<8 x float> inreg %a, i
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: s_branch .LBB33_2
-; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -8439,9 +8166,9 @@ define inreg <8 x float> @bitcast_v4f64_to_v8f32_scalar(<4 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB35_3
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB35_4
+; SI-NEXT: s_cbranch_execnz .LBB35_3
; SI-NEXT: .LBB35_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -8449,8 +8176,6 @@ define inreg <8 x float> @bitcast_v4f64_to_v8f32_scalar(<4 x double> inreg %a, i
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB35_3:
-; SI-NEXT: s_branch .LBB35_2
-; SI-NEXT: .LBB35_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -8465,9 +8190,9 @@ define inreg <8 x float> @bitcast_v4f64_to_v8f32_scalar(<4 x double> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB35_4
+; VI-NEXT: s_cbranch_execnz .LBB35_3
; VI-NEXT: .LBB35_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -8475,8 +8200,6 @@ define inreg <8 x float> @bitcast_v4f64_to_v8f32_scalar(<4 x double> inreg %a, i
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB35_3:
-; VI-NEXT: s_branch .LBB35_2
-; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -8491,9 +8214,9 @@ define inreg <8 x float> @bitcast_v4f64_to_v8f32_scalar(<4 x double> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
+; GFX9-NEXT: s_cbranch_execnz .LBB35_3
; GFX9-NEXT: .LBB35_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -8501,8 +8224,6 @@ define inreg <8 x float> @bitcast_v4f64_to_v8f32_scalar(<4 x double> inreg %a, i
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
-; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -8522,10 +8243,10 @@ define inreg <8 x float> @bitcast_v4f64_to_v8f32_scalar(<4 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-NEXT: .LBB35_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[6:7], s[6:7], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
@@ -8533,8 +8254,6 @@ define inreg <8 x float> @bitcast_v4f64_to_v8f32_scalar(<4 x double> inreg %a, i
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
-; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -8707,7 +8426,7 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB37_3
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s25, s23, 16
; SI-NEXT: s_lshr_b32 s24, s21, 16
@@ -8717,7 +8436,7 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3
; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB37_4
+; SI-NEXT: s_cbranch_execnz .LBB37_3
; SI-NEXT: .LBB37_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v7, s23, 1.0
; SI-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -8735,18 +8454,8 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3
; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; SI-NEXT: s_branch .LBB37_5
+; SI-NEXT: s_branch .LBB37_4
; SI-NEXT: .LBB37_3:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: s_branch .LBB37_2
-; SI-NEXT: .LBB37_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -8763,7 +8472,7 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v9, s6
; SI-NEXT: v_mov_b32_e32 v10, s8
; SI-NEXT: v_mov_b32_e32 v11, s10
-; SI-NEXT: .LBB37_5: ; %end
+; SI-NEXT: .LBB37_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -8794,9 +8503,9 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB37_3
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_4
+; VI-NEXT: s_cbranch_execnz .LBB37_3
; VI-NEXT: .LBB37_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -8808,8 +8517,6 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB37_3:
-; VI-NEXT: s_branch .LBB37_2
-; VI-NEXT: .LBB37_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -8824,9 +8531,9 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_4
+; GFX9-NEXT: s_cbranch_execnz .LBB37_3
; GFX9-NEXT: .LBB37_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -8838,8 +8545,6 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB37_3:
-; GFX9-NEXT: s_branch .LBB37_2
-; GFX9-NEXT: .LBB37_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -8859,10 +8564,10 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
; GFX11-NEXT: .LBB37_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v7, s7, 1.0
; GFX11-NEXT: v_add_f32_e64 v6, s6, 1.0
@@ -8874,8 +8579,6 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: s_branch .LBB37_2
-; GFX11-NEXT: .LBB37_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -9118,7 +8821,7 @@ define inreg <8 x float> @bitcast_v16i16_to_v8f32_scalar(<16 x i16> inreg %a, i3
; SI-NEXT: s_lshr_b32 s29, s17, 16
; SI-NEXT: s_lshr_b32 s40, s16, 16
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB39_4
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s40, 16
@@ -9196,15 +8899,12 @@ define inreg <8 x float> @bitcast_v16i16_to_v8f32_scalar(<16 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v6, s10
; SI-NEXT: v_mov_b32_e32 v7, s11
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB39_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11
-; SI-NEXT: s_branch .LBB39_2
;
; VI-LABEL: bitcast_v16i16_to_v8f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB39_4
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB39_3
; VI-NEXT: .LBB39_2: ; %cmp.true
@@ -9258,16 +8958,14 @@ define inreg <8 x float> @bitcast_v16i16_to_v8f32_scalar(<16 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_4:
-; VI-NEXT: s_branch .LBB39_2
;
; GFX9-LABEL: bitcast_v16i16_to_v8f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
+; GFX9-NEXT: s_cbranch_execnz .LBB39_3
; GFX9-NEXT: .LBB39_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
@@ -9279,8 +8977,6 @@ define inreg <8 x float> @bitcast_v16i16_to_v8f32_scalar(<16 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
-; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -9300,10 +8996,10 @@ define inreg <8 x float> @bitcast_v16i16_to_v8f32_scalar(<16 x i16> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_3
; GFX11-NEXT: .LBB39_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
@@ -9315,8 +9011,6 @@ define inreg <8 x float> @bitcast_v16i16_to_v8f32_scalar(<16 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
-; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -9489,7 +9183,7 @@ define inreg <16 x half> @bitcast_v8f32_to_v16f16_scalar(<8 x float> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB41_3
+; SI-NEXT: s_cbranch_scc0 .LBB41_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s25, s23, 16
; SI-NEXT: s_lshr_b32 s24, s21, 16
@@ -9499,7 +9193,7 @@ define inreg <16 x half> @bitcast_v8f32_to_v16f16_scalar(<8 x float> inreg %a, i
; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB41_4
+; SI-NEXT: s_cbranch_execnz .LBB41_3
; SI-NEXT: .LBB41_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v7, s23, 1.0
; SI-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -9517,18 +9211,8 @@ define inreg <16 x half> @bitcast_v8f32_to_v16f16_scalar(<8 x float> inreg %a, i
; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; SI-NEXT: s_branch .LBB41_5
+; SI-NEXT: s_branch .LBB41_4
; SI-NEXT: .LBB41_3:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: s_branch .LBB41_2
-; SI-NEXT: .LBB41_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -9545,7 +9229,7 @@ define inreg <16 x half> @bitcast_v8f32_to_v16f16_scalar(<8 x float> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v9, s6
; SI-NEXT: v_mov_b32_e32 v10, s8
; SI-NEXT: v_mov_b32_e32 v11, s10
-; SI-NEXT: .LBB41_5: ; %end
+; SI-NEXT: .LBB41_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -9576,9 +9260,9 @@ define inreg <16 x half> @bitcast_v8f32_to_v16f16_scalar(<8 x float> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB41_3
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB41_4
+; VI-NEXT: s_cbranch_execnz .LBB41_3
; VI-NEXT: .LBB41_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -9590,8 +9274,6 @@ define inreg <16 x half> @bitcast_v8f32_to_v16f16_scalar(<8 x float> inreg %a, i
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB41_3:
-; VI-NEXT: s_branch .LBB41_2
-; VI-NEXT: .LBB41_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -9606,9 +9288,9 @@ define inreg <16 x half> @bitcast_v8f32_to_v16f16_scalar(<8 x float> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB41_4
+; GFX9-NEXT: s_cbranch_execnz .LBB41_3
; GFX9-NEXT: .LBB41_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -9620,8 +9302,6 @@ define inreg <16 x half> @bitcast_v8f32_to_v16f16_scalar(<8 x float> inreg %a, i
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB41_3:
-; GFX9-NEXT: s_branch .LBB41_2
-; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -9641,10 +9321,10 @@ define inreg <16 x half> @bitcast_v8f32_to_v16f16_scalar(<8 x float> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
; GFX11-NEXT: .LBB41_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v7, s7, 1.0
; GFX11-NEXT: v_add_f32_e64 v6, s6, 1.0
@@ -9656,8 +9336,6 @@ define inreg <16 x half> @bitcast_v8f32_to_v16f16_scalar(<8 x float> inreg %a, i
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB41_3:
-; GFX11-NEXT: s_branch .LBB41_2
-; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -9932,7 +9610,7 @@ define inreg <8 x float> @bitcast_v16f16_to_v8f32_scalar(<16 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s29, s17, 16
; SI-NEXT: s_lshr_b32 s40, s16, 16
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB43_3
+; SI-NEXT: s_cbranch_scc0 .LBB43_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s40, 16
@@ -9958,7 +9636,7 @@ define inreg <8 x float> @bitcast_v16f16_to_v8f32_scalar(<16 x half> inreg %a, i
; SI-NEXT: s_and_b32 s11, s23, 0xffff
; SI-NEXT: s_lshl_b32 s12, s14, 16
; SI-NEXT: s_or_b32 s11, s11, s12
-; SI-NEXT: s_cbranch_execnz .LBB43_4
+; SI-NEXT: s_cbranch_execnz .LBB43_3
; SI-NEXT: .LBB43_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s40
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -10026,9 +9704,6 @@ define inreg <8 x float> @bitcast_v16f16_to_v8f32_scalar(<16 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v7, v9, v7
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB43_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11
-; SI-NEXT: s_branch .LBB43_2
-; SI-NEXT: .LBB43_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -10043,9 +9718,9 @@ define inreg <8 x float> @bitcast_v16f16_to_v8f32_scalar(<16 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB43_3
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB43_4
+; VI-NEXT: s_cbranch_execnz .LBB43_3
; VI-NEXT: .LBB43_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s23, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -10090,8 +9765,6 @@ define inreg <8 x float> @bitcast_v16f16_to_v8f32_scalar(<16 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v0, v8
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB43_3:
-; VI-NEXT: s_branch .LBB43_2
-; VI-NEXT: .LBB43_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -10106,9 +9779,9 @@ define inreg <8 x float> @bitcast_v16f16_to_v8f32_scalar(<16 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
+; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v7, s23, v0 op_sel_hi:[1,0]
@@ -10121,8 +9794,6 @@ define inreg <8 x float> @bitcast_v16f16_to_v8f32_scalar(<16 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
-; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -10142,10 +9813,10 @@ define inreg <8 x float> @bitcast_v16f16_to_v8f32_scalar(<16 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-NEXT: .LBB43_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
@@ -10157,8 +9828,6 @@ define inreg <8 x float> @bitcast_v16f16_to_v8f32_scalar(<16 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
-; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -10372,7 +10041,7 @@ define inreg <16 x bfloat> @bitcast_v8f32_to_v16bf16_scalar(<8 x float> inreg %a
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB45_3
+; SI-NEXT: s_cbranch_scc0 .LBB45_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s23, 0xffff0000
; SI-NEXT: s_lshl_b32 s7, s23, 16
@@ -10390,7 +10059,7 @@ define inreg <16 x bfloat> @bitcast_v8f32_to_v16bf16_scalar(<8 x float> inreg %a
; SI-NEXT: s_lshl_b32 s27, s17, 16
; SI-NEXT: s_and_b32 s28, s16, 0xffff0000
; SI-NEXT: s_lshl_b32 s29, s16, 16
-; SI-NEXT: s_cbranch_execnz .LBB45_4
+; SI-NEXT: s_cbranch_execnz .LBB45_3
; SI-NEXT: .LBB45_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
@@ -10416,26 +10085,8 @@ define inreg <16 x bfloat> @bitcast_v8f32_to_v16bf16_scalar(<8 x float> inreg %a
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v0
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: s_branch .LBB45_5
+; SI-NEXT: s_branch .LBB45_4
; SI-NEXT: .LBB45_3:
-; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB45_2
-; SI-NEXT: .LBB45_4:
; SI-NEXT: v_mov_b32_e32 v0, s29
; SI-NEXT: v_mov_b32_e32 v1, s28
; SI-NEXT: v_mov_b32_e32 v2, s27
@@ -10452,7 +10103,7 @@ define inreg <16 x bfloat> @bitcast_v8f32_to_v16bf16_scalar(<8 x float> inreg %a
; SI-NEXT: v_mov_b32_e32 v10, s8
; SI-NEXT: v_mov_b32_e32 v8, s7
; SI-NEXT: v_mov_b32_e32 v9, s6
-; SI-NEXT: .LBB45_5: ; %end
+; SI-NEXT: .LBB45_4: ; %end
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
@@ -10491,9 +10142,9 @@ define inreg <16 x bfloat> @bitcast_v8f32_to_v16bf16_scalar(<8 x float> inreg %a
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB45_3
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB45_4
+; VI-NEXT: s_cbranch_execnz .LBB45_3
; VI-NEXT: .LBB45_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -10505,8 +10156,6 @@ define inreg <16 x bfloat> @bitcast_v8f32_to_v16bf16_scalar(<8 x float> inreg %a
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB45_3:
-; VI-NEXT: s_branch .LBB45_2
-; VI-NEXT: .LBB45_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -10521,9 +10170,9 @@ define inreg <16 x bfloat> @bitcast_v8f32_to_v16bf16_scalar(<8 x float> inreg %a
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB45_4
+; GFX9-NEXT: s_cbranch_execnz .LBB45_3
; GFX9-NEXT: .LBB45_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -10535,8 +10184,6 @@ define inreg <16 x bfloat> @bitcast_v8f32_to_v16bf16_scalar(<8 x float> inreg %a
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB45_3:
-; GFX9-NEXT: s_branch .LBB45_2
-; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -10556,10 +10203,10 @@ define inreg <16 x bfloat> @bitcast_v8f32_to_v16bf16_scalar(<8 x float> inreg %a
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
; GFX11-NEXT: .LBB45_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v7, s7, 1.0
; GFX11-NEXT: v_add_f32_e64 v6, s6, 1.0
@@ -10571,8 +10218,6 @@ define inreg <16 x bfloat> @bitcast_v8f32_to_v16bf16_scalar(<8 x float> inreg %a
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB45_3:
-; GFX11-NEXT: s_branch .LBB45_2
-; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -11376,7 +11021,7 @@ define inreg <8 x float> @bitcast_v16bf16_to_v8f32_scalar(<16 x bfloat> inreg %a
; SI-NEXT: v_mul_f32_e64 v25, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v11, 1.0, s7
; SI-NEXT: v_mul_f32_e64 v9, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB47_4
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v32
; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v31
@@ -11446,17 +11091,14 @@ define inreg <8 x float> @bitcast_v16bf16_to_v8f32_scalar(<16 x bfloat> inreg %a
; SI-NEXT: v_lshr_b64 v[7:8], v[7:8], 16
; SI-NEXT: .LBB47_3: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB47_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB47_2
;
; VI-LABEL: bitcast_v16bf16_to_v8f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_4
+; VI-NEXT: s_cbranch_execnz .LBB47_3
; VI-NEXT: .LBB47_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s23, 16
; VI-NEXT: v_mov_b32_e32 v8, 0x40c00000
@@ -11608,8 +11250,6 @@ define inreg <8 x float> @bitcast_v16bf16_to_v8f32_scalar(<16 x bfloat> inreg %a
; VI-NEXT: v_mov_b32_e32 v5, v8
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB47_3:
-; VI-NEXT: s_branch .LBB47_2
-; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -11624,9 +11264,9 @@ define inreg <8 x float> @bitcast_v16bf16_to_v8f32_scalar(<16 x bfloat> inreg %a
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
+; GFX9-NEXT: s_cbranch_execnz .LBB47_3
; GFX9-NEXT: .LBB47_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s23, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -11784,8 +11424,6 @@ define inreg <8 x float> @bitcast_v16bf16_to_v8f32_scalar(<16 x bfloat> inreg %a
; GFX9-NEXT: v_lshl_or_b32 v0, v9, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
-; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -11805,10 +11443,10 @@ define inreg <8 x float> @bitcast_v16bf16_to_v8f32_scalar(<16 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: s_mov_b32 s4, s16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s8, s7, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
@@ -11969,8 +11607,6 @@ define inreg <8 x float> @bitcast_v16bf16_to_v8f32_scalar(<16 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB47_2
-; GFX11-TRUE16-NEXT: .LBB47_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -11986,10 +11622,10 @@ define inreg <8 x float> @bitcast_v16bf16_to_v8f32_scalar(<16 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: s_mov_b32 s4, s16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-FAKE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s7, 16
; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff0000
@@ -12167,8 +11803,6 @@ define inreg <8 x float> @bitcast_v16bf16_to_v8f32_scalar(<16 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v12, 16, v9
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB47_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB47_2
-; GFX11-FAKE16-NEXT: .LBB47_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -12737,7 +12371,7 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB49_3
+; SI-NEXT: s_cbranch_scc0 .LBB49_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s72, s23, 24
; SI-NEXT: s_lshr_b32 s74, s23, 16
@@ -12763,7 +12397,7 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16
; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8
-; SI-NEXT: s_cbranch_execnz .LBB49_4
+; SI-NEXT: s_cbranch_execnz .LBB49_3
; SI-NEXT: .LBB49_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v39, s17, 1.0
; SI-NEXT: v_add_f32_e64 v38, s16, 1.0
@@ -12797,34 +12431,8 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v39
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v39
; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v39
-; SI-NEXT: s_branch .LBB49_5
+; SI-NEXT: s_branch .LBB49_4
; SI-NEXT: .LBB49_3:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: s_branch .LBB49_2
-; SI-NEXT: .LBB49_4:
; SI-NEXT: v_mov_b32_e32 v38, s16
; SI-NEXT: v_mov_b32_e32 v39, s17
; SI-NEXT: v_mov_b32_e32 v34, s18
@@ -12857,7 +12465,7 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v3, s4
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: v_mov_b32_e32 v1, s8
-; SI-NEXT: .LBB49_5: ; %end
+; SI-NEXT: .LBB49_4: ; %end
; SI-NEXT: v_mov_b32_e32 v2, v0
; SI-NEXT: v_mov_b32_e32 v0, v38
; SI-NEXT: v_mov_b32_e32 v4, v39
@@ -12876,7 +12484,7 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB49_3
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s14, s23, 24
; VI-NEXT: s_lshr_b32 s15, s23, 16
@@ -12902,7 +12510,7 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; VI-NEXT: s_lshr_b64 s[8:9], s[20:21], 24
; VI-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB49_4
+; VI-NEXT: s_cbranch_execnz .LBB49_3
; VI-NEXT: .LBB49_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
@@ -12936,34 +12544,8 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; VI-NEXT: v_lshrrev_b32_e32 v5, 8, v1
; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
; VI-NEXT: v_lshrrev_b32_e32 v35, 8, v0
-; VI-NEXT: s_branch .LBB49_5
+; VI-NEXT: s_branch .LBB49_4
; VI-NEXT: .LBB49_3:
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr29
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr28
-; VI-NEXT: ; implicit-def: $sgpr27
-; VI-NEXT: ; implicit-def: $sgpr26
-; VI-NEXT: ; implicit-def: $sgpr24
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr25
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB49_2
-; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v8, s18
@@ -12996,7 +12578,7 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v19, s8
; VI-NEXT: v_mov_b32_e32 v11, s6
; VI-NEXT: v_mov_b32_e32 v3, s4
-; VI-NEXT: .LBB49_5: ; %end
+; VI-NEXT: .LBB49_4: ; %end
; VI-NEXT: v_mov_b32_e32 v4, v1
; VI-NEXT: v_mov_b32_e32 v12, v9
; VI-NEXT: v_mov_b32_e32 v20, v17
@@ -13011,7 +12593,7 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s14, s23, 24
; GFX9-NEXT: s_lshr_b32 s15, s23, 16
@@ -13037,7 +12619,7 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; GFX9-NEXT: s_lshr_b64 s[8:9], s[20:21], 24
; GFX9-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
+; GFX9-NEXT: s_cbranch_execnz .LBB49_3
; GFX9-NEXT: .LBB49_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
@@ -13071,34 +12653,8 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 8, v0
-; GFX9-NEXT: s_branch .LBB49_5
+; GFX9-NEXT: s_branch .LBB49_4
; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr29
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr28
-; GFX9-NEXT: ; implicit-def: $sgpr27
-; GFX9-NEXT: ; implicit-def: $sgpr26
-; GFX9-NEXT: ; implicit-def: $sgpr24
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr25
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB49_2
-; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v8, s18
@@ -13131,7 +12687,7 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v19, s8
; GFX9-NEXT: v_mov_b32_e32 v11, s6
; GFX9-NEXT: v_mov_b32_e32 v3, s4
-; GFX9-NEXT: .LBB49_5: ; %end
+; GFX9-NEXT: .LBB49_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v4, v1
; GFX9-NEXT: v_mov_b32_e32 v12, v9
; GFX9-NEXT: v_mov_b32_e32 v20, v17
@@ -13147,7 +12703,7 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s12, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s13, s19, 24
; GFX11-NEXT: s_lshr_b32 s14, s19, 16
@@ -13174,7 +12730,7 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v39, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v37, s3, 1.0
@@ -13208,34 +12764,8 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v39
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v38
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v38
-; GFX11-NEXT: s_branch .LBB49_5
+; GFX11-NEXT: s_branch .LBB49_4
; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr29
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr27
-; GFX11-NEXT: ; implicit-def: $sgpr26
-; GFX11-NEXT: ; implicit-def: $sgpr24
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr25
-; GFX11-NEXT: ; implicit-def: $sgpr23
-; GFX11-NEXT: ; implicit-def: $sgpr22
-; GFX11-NEXT: ; implicit-def: $sgpr21
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr20
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: s_branch .LBB49_2
-; GFX11-NEXT: .LBB49_4:
; GFX11-NEXT: v_dual_mov_b32 v38, s0 :: v_dual_mov_b32 v39, s1
; GFX11-NEXT: v_dual_mov_b32 v36, s2 :: v_dual_mov_b32 v37, s3
; GFX11-NEXT: v_dual_mov_b32 v34, s16 :: v_dual_mov_b32 v35, s17
@@ -13256,7 +12786,7 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; GFX11-NEXT: v_mov_b32_e32 v19, s8
; GFX11-NEXT: v_mov_b32_e32 v11, s6
; GFX11-NEXT: v_mov_b32_e32 v3, s4
-; GFX11-NEXT: .LBB49_5: ; %end
+; GFX11-NEXT: .LBB49_4: ; %end
; GFX11-NEXT: v_mov_b32_e32 v0, v38
; GFX11-NEXT: v_mov_b32_e32 v4, v39
; GFX11-NEXT: v_mov_b32_e32 v8, v36
@@ -14251,7 +13781,7 @@ define inreg <8 x float> @bitcast_v32i8_to_v8f32_scalar(<32 x i8> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s62, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s63, v0
-; SI-NEXT: s_cbranch_scc0 .LBB51_4
+; SI-NEXT: s_cbranch_scc0 .LBB51_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -14441,9 +13971,6 @@ define inreg <8 x float> @bitcast_v32i8_to_v8f32_scalar(<32 x i8> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s10
; SI-NEXT: v_mov_b32_e32 v7, s11
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB51_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11
-; SI-NEXT: s_branch .LBB51_2
;
; VI-LABEL: bitcast_v32i8_to_v8f32_scalar:
; VI: ; %bb.0:
@@ -14468,7 +13995,7 @@ define inreg <8 x float> @bitcast_v32i8_to_v8f32_scalar(<32 x i8> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB51_4
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v7, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -14604,9 +14131,6 @@ define inreg <8 x float> @bitcast_v32i8_to_v8f32_scalar(<32 x i8> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v7
; VI-NEXT: .LBB51_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; VI-NEXT: s_branch .LBB51_2
;
; GFX9-LABEL: bitcast_v32i8_to_v8f32_scalar:
; GFX9: ; %bb.0:
@@ -14631,7 +14155,7 @@ define inreg <8 x float> @bitcast_v32i8_to_v8f32_scalar(<32 x i8> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s46, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s47, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v7, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -14760,9 +14284,6 @@ define inreg <8 x float> @bitcast_v32i8_to_v8f32_scalar(<32 x i8> inreg %a, i32
; GFX9-NEXT: v_or_b32_sdwa v7, v8, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB51_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB51_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX9-NEXT: s_branch .LBB51_2
;
; GFX11-LABEL: bitcast_v32i8_to_v8f32_scalar:
; GFX11: ; %bb.0:
@@ -14784,7 +14305,7 @@ define inreg <8 x float> @bitcast_v32i8_to_v8f32_scalar(<32 x i8> inreg %a, i32
; GFX11-NEXT: v_readfirstlane_b32 s41, v0
; GFX11-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-NEXT: s_mov_b32 s42, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v3, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -14898,9 +14419,6 @@ define inreg <8 x float> @bitcast_v32i8_to_v8f32_scalar(<32 x i8> inreg %a, i32
; GFX11-NEXT: v_or_b32_e32 v7, v12, v13
; GFX11-NEXT: .LBB51_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB51_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-NEXT: s_branch .LBB51_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15024,7 +14542,7 @@ define inreg <4 x double> @bitcast_v4i64_to_v4f64_scalar(<4 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB53_4
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB53_3
; SI-NEXT: .LBB53_2: ; %cmp.true
@@ -15046,14 +14564,12 @@ define inreg <4 x double> @bitcast_v4i64_to_v4f64_scalar(<4 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB53_4:
-; SI-NEXT: s_branch .LBB53_2
;
; VI-LABEL: bitcast_v4i64_to_v4f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB53_4
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB53_3
; VI-NEXT: .LBB53_2: ; %cmp.true
@@ -15075,14 +14591,12 @@ define inreg <4 x double> @bitcast_v4i64_to_v4f64_scalar(<4 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB53_4:
-; VI-NEXT: s_branch .LBB53_2
;
; GFX9-LABEL: bitcast_v4i64_to_v4f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB53_3
; GFX9-NEXT: .LBB53_2: ; %cmp.true
@@ -15104,15 +14618,13 @@ define inreg <4 x double> @bitcast_v4i64_to_v4f64_scalar(<4 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB53_4:
-; GFX9-NEXT: s_branch .LBB53_2
;
; GFX11-LABEL: bitcast_v4i64_to_v4f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
@@ -15131,8 +14643,6 @@ define inreg <4 x double> @bitcast_v4i64_to_v4f64_scalar(<4 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15240,9 +14750,9 @@ define inreg <4 x i64> @bitcast_v4f64_to_v4i64_scalar(<4 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB55_3
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB55_4
+; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
@@ -15250,8 +14760,6 @@ define inreg <4 x i64> @bitcast_v4f64_to_v4i64_scalar(<4 x double> inreg %a, i32
; SI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB55_3:
-; SI-NEXT: s_branch .LBB55_2
-; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -15266,9 +14774,9 @@ define inreg <4 x i64> @bitcast_v4f64_to_v4i64_scalar(<4 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_4
+; VI-NEXT: s_cbranch_execnz .LBB55_3
; VI-NEXT: .LBB55_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
@@ -15276,8 +14784,6 @@ define inreg <4 x i64> @bitcast_v4f64_to_v4i64_scalar(<4 x double> inreg %a, i32
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB55_3:
-; VI-NEXT: s_branch .LBB55_2
-; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -15292,9 +14798,9 @@ define inreg <4 x i64> @bitcast_v4f64_to_v4i64_scalar(<4 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
+; GFX9-NEXT: s_cbranch_execnz .LBB55_3
; GFX9-NEXT: .LBB55_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
@@ -15302,8 +14808,6 @@ define inreg <4 x i64> @bitcast_v4f64_to_v4i64_scalar(<4 x double> inreg %a, i32
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
-; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -15323,10 +14827,10 @@ define inreg <4 x i64> @bitcast_v4f64_to_v4i64_scalar(<4 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-NEXT: .LBB55_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
@@ -15334,8 +14838,6 @@ define inreg <4 x i64> @bitcast_v4f64_to_v4i64_scalar(<4 x double> inreg %a, i32
; GFX11-NEXT: v_add_f64 v[6:7], s[6:7], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
-; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -15515,7 +15017,7 @@ define inreg <16 x i16> @bitcast_v4i64_to_v16i16_scalar(<4 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB57_4
+; SI-NEXT: s_cbranch_scc0 .LBB57_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s14, s23, 16
; SI-NEXT: s_lshr_b32 s15, s21, 16
@@ -15577,22 +15079,12 @@ define inreg <16 x i16> @bitcast_v4i64_to_v16i16_scalar(<4 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s4
; SI-NEXT: v_mov_b32_e32 v7, s11
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB57_4:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: s_branch .LBB57_2
;
; VI-LABEL: bitcast_v4i64_to_v16i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB57_3
; VI-NEXT: .LBB57_2: ; %cmp.true
@@ -15614,14 +15106,12 @@ define inreg <16 x i16> @bitcast_v4i64_to_v16i16_scalar(<4 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v4i64_to_v16i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB57_3
; GFX9-NEXT: .LBB57_2: ; %cmp.true
@@ -15643,15 +15133,13 @@ define inreg <16 x i16> @bitcast_v4i64_to_v16i16_scalar(<4 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB57_4:
-; GFX9-NEXT: s_branch .LBB57_2
;
; GFX11-LABEL: bitcast_v4i64_to_v16i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB57_3
@@ -15671,8 +15159,6 @@ define inreg <16 x i16> @bitcast_v4i64_to_v16i16_scalar(<4 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB57_4:
-; GFX11-NEXT: s_branch .LBB57_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15910,7 +15396,7 @@ define inreg <4 x i64> @bitcast_v16i16_to_v4i64_scalar(<16 x i16> inreg %a, i32
; SI-NEXT: s_lshr_b32 s29, s17, 16
; SI-NEXT: s_lshr_b32 s40, s16, 16
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB59_4
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s40, 16
@@ -15988,15 +15474,12 @@ define inreg <4 x i64> @bitcast_v16i16_to_v4i64_scalar(<16 x i16> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s10
; SI-NEXT: v_mov_b32_e32 v7, s11
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB59_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11
-; SI-NEXT: s_branch .LBB59_2
;
; VI-LABEL: bitcast_v16i16_to_v4i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_4
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB59_3
; VI-NEXT: .LBB59_2: ; %cmp.true
@@ -16050,16 +15533,14 @@ define inreg <4 x i64> @bitcast_v16i16_to_v4i64_scalar(<16 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB59_4:
-; VI-NEXT: s_branch .LBB59_2
;
; GFX9-LABEL: bitcast_v16i16_to_v4i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
+; GFX9-NEXT: s_cbranch_execnz .LBB59_3
; GFX9-NEXT: .LBB59_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
@@ -16071,8 +15552,6 @@ define inreg <4 x i64> @bitcast_v16i16_to_v4i64_scalar(<16 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
-; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -16092,10 +15571,10 @@ define inreg <4 x i64> @bitcast_v16i16_to_v4i64_scalar(<16 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-NEXT: .LBB59_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
@@ -16107,8 +15586,6 @@ define inreg <4 x i64> @bitcast_v16i16_to_v4i64_scalar(<16 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB59_3:
-; GFX11-NEXT: s_branch .LBB59_2
-; GFX11-NEXT: .LBB59_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -16288,7 +15765,7 @@ define inreg <16 x half> @bitcast_v4i64_to_v16f16_scalar(<4 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB61_4
+; SI-NEXT: s_cbranch_scc0 .LBB61_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s14, s23, 16
; SI-NEXT: s_lshr_b32 s15, s21, 16
@@ -16350,22 +15827,12 @@ define inreg <16 x half> @bitcast_v4i64_to_v16f16_scalar(<4 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s4
; SI-NEXT: v_mov_b32_e32 v7, s11
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB61_4:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: s_branch .LBB61_2
;
; VI-LABEL: bitcast_v4i64_to_v16f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB61_4
+; VI-NEXT: s_cbranch_scc0 .LBB61_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB61_3
; VI-NEXT: .LBB61_2: ; %cmp.true
@@ -16387,14 +15854,12 @@ define inreg <16 x half> @bitcast_v4i64_to_v16f16_scalar(<4 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB61_4:
-; VI-NEXT: s_branch .LBB61_2
;
; GFX9-LABEL: bitcast_v4i64_to_v16f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB61_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB61_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB61_3
; GFX9-NEXT: .LBB61_2: ; %cmp.true
@@ -16416,15 +15881,13 @@ define inreg <16 x half> @bitcast_v4i64_to_v16f16_scalar(<4 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB61_4:
-; GFX9-NEXT: s_branch .LBB61_2
;
; GFX11-LABEL: bitcast_v4i64_to_v16f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB61_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB61_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB61_3
@@ -16444,8 +15907,6 @@ define inreg <16 x half> @bitcast_v4i64_to_v16f16_scalar(<4 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB61_4:
-; GFX11-NEXT: s_branch .LBB61_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16715,7 +16176,7 @@ define inreg <4 x i64> @bitcast_v16f16_to_v4i64_scalar(<16 x half> inreg %a, i32
; SI-NEXT: s_lshr_b32 s29, s17, 16
; SI-NEXT: s_lshr_b32 s40, s16, 16
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB63_3
+; SI-NEXT: s_cbranch_scc0 .LBB63_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s40, 16
@@ -16741,7 +16202,7 @@ define inreg <4 x i64> @bitcast_v16f16_to_v4i64_scalar(<16 x half> inreg %a, i32
; SI-NEXT: s_and_b32 s11, s23, 0xffff
; SI-NEXT: s_lshl_b32 s12, s14, 16
; SI-NEXT: s_or_b32 s11, s11, s12
-; SI-NEXT: s_cbranch_execnz .LBB63_4
+; SI-NEXT: s_cbranch_execnz .LBB63_3
; SI-NEXT: .LBB63_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s40
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -16809,9 +16270,6 @@ define inreg <4 x i64> @bitcast_v16f16_to_v4i64_scalar(<16 x half> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v7, v9, v7
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB63_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11
-; SI-NEXT: s_branch .LBB63_2
-; SI-NEXT: .LBB63_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -16826,9 +16284,9 @@ define inreg <4 x i64> @bitcast_v16f16_to_v4i64_scalar(<16 x half> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB63_3
+; VI-NEXT: s_cbranch_scc0 .LBB63_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB63_4
+; VI-NEXT: s_cbranch_execnz .LBB63_3
; VI-NEXT: .LBB63_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s23, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -16873,8 +16331,6 @@ define inreg <4 x i64> @bitcast_v16f16_to_v4i64_scalar(<16 x half> inreg %a, i32
; VI-NEXT: v_or_b32_e32 v0, v0, v8
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB63_3:
-; VI-NEXT: s_branch .LBB63_2
-; VI-NEXT: .LBB63_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -16889,9 +16345,9 @@ define inreg <4 x i64> @bitcast_v16f16_to_v4i64_scalar(<16 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB63_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB63_4
+; GFX9-NEXT: s_cbranch_execnz .LBB63_3
; GFX9-NEXT: .LBB63_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v7, s23, v0 op_sel_hi:[1,0]
@@ -16904,8 +16360,6 @@ define inreg <4 x i64> @bitcast_v16f16_to_v4i64_scalar(<16 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB63_3:
-; GFX9-NEXT: s_branch .LBB63_2
-; GFX9-NEXT: .LBB63_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -16925,10 +16379,10 @@ define inreg <4 x i64> @bitcast_v16f16_to_v4i64_scalar(<16 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB63_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB63_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB63_3
; GFX11-NEXT: .LBB63_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
@@ -16940,8 +16394,6 @@ define inreg <4 x i64> @bitcast_v16f16_to_v4i64_scalar(<16 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB63_3:
-; GFX11-NEXT: s_branch .LBB63_2
-; GFX11-NEXT: .LBB63_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -17162,7 +16614,7 @@ define inreg <16 x bfloat> @bitcast_v4i64_to_v16bf16_scalar(<4 x i64> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB65_4
+; SI-NEXT: s_cbranch_scc0 .LBB65_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s7, s23, 0xffff0000
; SI-NEXT: s_lshl_b32 s6, s23, 16
@@ -17240,30 +16692,12 @@ define inreg <16 x bfloat> @bitcast_v4i64_to_v16bf16_scalar(<4 x i64> inreg %a,
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s6
; SI-NEXT: v_lshr_b64 v[7:8], v[7:8], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB65_4:
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB65_2
;
; VI-LABEL: bitcast_v4i64_to_v16bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB65_4
+; VI-NEXT: s_cbranch_scc0 .LBB65_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB65_3
; VI-NEXT: .LBB65_2: ; %cmp.true
@@ -17285,14 +16719,12 @@ define inreg <16 x bfloat> @bitcast_v4i64_to_v16bf16_scalar(<4 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB65_4:
-; VI-NEXT: s_branch .LBB65_2
;
; GFX9-LABEL: bitcast_v4i64_to_v16bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB65_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB65_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB65_3
; GFX9-NEXT: .LBB65_2: ; %cmp.true
@@ -17314,15 +16746,13 @@ define inreg <16 x bfloat> @bitcast_v4i64_to_v16bf16_scalar(<4 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB65_4:
-; GFX9-NEXT: s_branch .LBB65_2
;
; GFX11-LABEL: bitcast_v4i64_to_v16bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB65_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB65_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB65_3
@@ -17342,8 +16772,6 @@ define inreg <16 x bfloat> @bitcast_v4i64_to_v16bf16_scalar(<4 x i64> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB65_4:
-; GFX11-NEXT: s_branch .LBB65_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -18142,7 +17570,7 @@ define inreg <4 x i64> @bitcast_v16bf16_to_v4i64_scalar(<16 x bfloat> inreg %a,
; SI-NEXT: v_mul_f32_e64 v25, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v11, 1.0, s7
; SI-NEXT: v_mul_f32_e64 v9, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB67_4
+; SI-NEXT: s_cbranch_scc0 .LBB67_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v32
; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v31
@@ -18212,17 +17640,14 @@ define inreg <4 x i64> @bitcast_v16bf16_to_v4i64_scalar(<16 x bfloat> inreg %a,
; SI-NEXT: v_lshr_b64 v[7:8], v[7:8], 16
; SI-NEXT: .LBB67_3: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB67_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB67_2
;
; VI-LABEL: bitcast_v16bf16_to_v4i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB67_3
+; VI-NEXT: s_cbranch_scc0 .LBB67_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB67_4
+; VI-NEXT: s_cbranch_execnz .LBB67_3
; VI-NEXT: .LBB67_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s23, 16
; VI-NEXT: v_mov_b32_e32 v8, 0x40c00000
@@ -18374,8 +17799,6 @@ define inreg <4 x i64> @bitcast_v16bf16_to_v4i64_scalar(<16 x bfloat> inreg %a,
; VI-NEXT: v_mov_b32_e32 v5, v8
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB67_3:
-; VI-NEXT: s_branch .LBB67_2
-; VI-NEXT: .LBB67_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -18390,9 +17813,9 @@ define inreg <4 x i64> @bitcast_v16bf16_to_v4i64_scalar(<16 x bfloat> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB67_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB67_4
+; GFX9-NEXT: s_cbranch_execnz .LBB67_3
; GFX9-NEXT: .LBB67_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s23, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -18550,8 +17973,6 @@ define inreg <4 x i64> @bitcast_v16bf16_to_v4i64_scalar(<16 x bfloat> inreg %a,
; GFX9-NEXT: v_lshl_or_b32 v0, v9, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB67_3:
-; GFX9-NEXT: s_branch .LBB67_2
-; GFX9-NEXT: .LBB67_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -18571,10 +17992,10 @@ define inreg <4 x i64> @bitcast_v16bf16_to_v4i64_scalar(<16 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: s_mov_b32 s4, s16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB67_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB67_3
; GFX11-TRUE16-NEXT: .LBB67_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s8, s7, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
@@ -18735,8 +18156,6 @@ define inreg <4 x i64> @bitcast_v16bf16_to_v4i64_scalar(<16 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB67_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB67_2
-; GFX11-TRUE16-NEXT: .LBB67_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -18752,10 +18171,10 @@ define inreg <4 x i64> @bitcast_v16bf16_to_v4i64_scalar(<16 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: s_mov_b32 s4, s16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB67_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB67_3
; GFX11-FAKE16-NEXT: .LBB67_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s7, 16
; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff0000
@@ -18933,8 +18352,6 @@ define inreg <4 x i64> @bitcast_v16bf16_to_v4i64_scalar(<16 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v12, 16, v9
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB67_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB67_2
-; GFX11-FAKE16-NEXT: .LBB67_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -19513,7 +18930,7 @@ define inreg <32 x i8> @bitcast_v4i64_to_v32i8_scalar(<4 x i64> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB69_4
+; SI-NEXT: s_cbranch_scc0 .LBB69_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s56, s23, 24
; SI-NEXT: s_lshr_b32 s57, s23, 16
@@ -19607,38 +19024,12 @@ define inreg <32 x i8> @bitcast_v4i64_to_v32i8_scalar(<4 x i64> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v30, s57
; SI-NEXT: v_mov_b32_e32 v31, s56
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB69_4:
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: s_branch .LBB69_2
;
; VI-LABEL: bitcast_v4i64_to_v32i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB69_4
+; VI-NEXT: s_cbranch_scc0 .LBB69_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s14, s23, 24
; VI-NEXT: s_lshr_b32 s15, s23, 16
@@ -19732,38 +19123,12 @@ define inreg <32 x i8> @bitcast_v4i64_to_v32i8_scalar(<4 x i64> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v30, s15
; VI-NEXT: v_mov_b32_e32 v31, s14
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB69_4:
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr29
-; VI-NEXT: ; implicit-def: $sgpr28
-; VI-NEXT: ; implicit-def: $sgpr27
-; VI-NEXT: ; implicit-def: $sgpr26
-; VI-NEXT: ; implicit-def: $sgpr25
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr24
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB69_2
;
; GFX9-LABEL: bitcast_v4i64_to_v32i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB69_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB69_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s14, s23, 24
; GFX9-NEXT: s_lshr_b32 s15, s23, 16
@@ -19857,39 +19222,13 @@ define inreg <32 x i8> @bitcast_v4i64_to_v32i8_scalar(<4 x i64> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v30, s15
; GFX9-NEXT: v_mov_b32_e32 v31, s14
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB69_4:
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr29
-; GFX9-NEXT: ; implicit-def: $sgpr28
-; GFX9-NEXT: ; implicit-def: $sgpr27
-; GFX9-NEXT: ; implicit-def: $sgpr26
-; GFX9-NEXT: ; implicit-def: $sgpr25
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr24
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB69_2
;
; GFX11-LABEL: bitcast_v4i64_to_v32i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s46, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB69_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB69_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s12, s19, 24
; GFX11-NEXT: s_lshr_b32 s13, s19, 16
@@ -19969,32 +19308,6 @@ define inreg <32 x i8> @bitcast_v4i64_to_v32i8_scalar(<4 x i64> inreg %a, i32 in
; GFX11-NEXT: v_dual_mov_b32 v28, s19 :: v_dual_mov_b32 v29, s14
; GFX11-NEXT: v_dual_mov_b32 v30, s13 :: v_dual_mov_b32 v31, s12
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB69_4:
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr29
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr27
-; GFX11-NEXT: ; implicit-def: $sgpr26
-; GFX11-NEXT: ; implicit-def: $sgpr25
-; GFX11-NEXT: ; implicit-def: $sgpr24
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr23
-; GFX11-NEXT: ; implicit-def: $sgpr22
-; GFX11-NEXT: ; implicit-def: $sgpr21
-; GFX11-NEXT: ; implicit-def: $sgpr20
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: s_branch .LBB69_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -20980,7 +20293,7 @@ define inreg <4 x i64> @bitcast_v32i8_to_v4i64_scalar(<32 x i8> inreg %a, i32 in
; SI-NEXT: v_readfirstlane_b32 s62, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s63, v0
-; SI-NEXT: s_cbranch_scc0 .LBB71_4
+; SI-NEXT: s_cbranch_scc0 .LBB71_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -21170,9 +20483,6 @@ define inreg <4 x i64> @bitcast_v32i8_to_v4i64_scalar(<32 x i8> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v6, s10
; SI-NEXT: v_mov_b32_e32 v7, s11
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB71_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11
-; SI-NEXT: s_branch .LBB71_2
;
; VI-LABEL: bitcast_v32i8_to_v4i64_scalar:
; VI: ; %bb.0:
@@ -21197,7 +20507,7 @@ define inreg <4 x i64> @bitcast_v32i8_to_v4i64_scalar(<32 x i8> inreg %a, i32 in
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB71_4
+; VI-NEXT: s_cbranch_scc0 .LBB71_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v7, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -21333,9 +20643,6 @@ define inreg <4 x i64> @bitcast_v32i8_to_v4i64_scalar(<32 x i8> inreg %a, i32 in
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v7
; VI-NEXT: .LBB71_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB71_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; VI-NEXT: s_branch .LBB71_2
;
; GFX9-LABEL: bitcast_v32i8_to_v4i64_scalar:
; GFX9: ; %bb.0:
@@ -21360,7 +20667,7 @@ define inreg <4 x i64> @bitcast_v32i8_to_v4i64_scalar(<32 x i8> inreg %a, i32 in
; GFX9-NEXT: v_readfirstlane_b32 s46, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s47, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB71_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB71_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v7, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -21489,9 +20796,6 @@ define inreg <4 x i64> @bitcast_v32i8_to_v4i64_scalar(<32 x i8> inreg %a, i32 in
; GFX9-NEXT: v_or_b32_sdwa v7, v8, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB71_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB71_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX9-NEXT: s_branch .LBB71_2
;
; GFX11-LABEL: bitcast_v32i8_to_v4i64_scalar:
; GFX11: ; %bb.0:
@@ -21513,7 +20817,7 @@ define inreg <4 x i64> @bitcast_v32i8_to_v4i64_scalar(<32 x i8> inreg %a, i32 in
; GFX11-NEXT: v_readfirstlane_b32 s41, v0
; GFX11-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-NEXT: s_mov_b32 s42, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB71_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB71_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v3, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -21627,9 +20931,6 @@ define inreg <4 x i64> @bitcast_v32i8_to_v4i64_scalar(<32 x i8> inreg %a, i32 in
; GFX11-NEXT: v_or_b32_e32 v7, v12, v13
; GFX11-NEXT: .LBB71_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB71_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-NEXT: s_branch .LBB71_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -21788,7 +21089,7 @@ define inreg <16 x i16> @bitcast_v4f64_to_v16i16_scalar(<4 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB73_3
+; SI-NEXT: s_cbranch_scc0 .LBB73_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s25, s23, 16
; SI-NEXT: s_lshr_b32 s24, s21, 16
@@ -21798,7 +21099,7 @@ define inreg <16 x i16> @bitcast_v4f64_to_v16i16_scalar(<4 x double> inreg %a, i
; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB73_4
+; SI-NEXT: s_cbranch_execnz .LBB73_3
; SI-NEXT: .LBB73_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -21812,18 +21113,8 @@ define inreg <16 x i16> @bitcast_v4f64_to_v16i16_scalar(<4 x double> inreg %a, i
; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v1
-; SI-NEXT: s_branch .LBB73_5
+; SI-NEXT: s_branch .LBB73_4
; SI-NEXT: .LBB73_3:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: s_branch .LBB73_2
-; SI-NEXT: .LBB73_4:
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v5, s21
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -21840,7 +21131,7 @@ define inreg <16 x i16> @bitcast_v4f64_to_v16i16_scalar(<4 x double> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v10, s8
; SI-NEXT: v_mov_b32_e32 v9, s6
; SI-NEXT: v_mov_b32_e32 v8, s4
-; SI-NEXT: .LBB73_5: ; %end
+; SI-NEXT: .LBB73_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -21871,9 +21162,9 @@ define inreg <16 x i16> @bitcast_v4f64_to_v16i16_scalar(<4 x double> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB73_3
+; VI-NEXT: s_cbranch_scc0 .LBB73_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB73_4
+; VI-NEXT: s_cbranch_execnz .LBB73_3
; VI-NEXT: .LBB73_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -21881,8 +21172,6 @@ define inreg <16 x i16> @bitcast_v4f64_to_v16i16_scalar(<4 x double> inreg %a, i
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB73_3:
-; VI-NEXT: s_branch .LBB73_2
-; VI-NEXT: .LBB73_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -21897,9 +21186,9 @@ define inreg <16 x i16> @bitcast_v4f64_to_v16i16_scalar(<4 x double> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB73_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB73_4
+; GFX9-NEXT: s_cbranch_execnz .LBB73_3
; GFX9-NEXT: .LBB73_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -21907,8 +21196,6 @@ define inreg <16 x i16> @bitcast_v4f64_to_v16i16_scalar(<4 x double> inreg %a, i
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB73_3:
-; GFX9-NEXT: s_branch .LBB73_2
-; GFX9-NEXT: .LBB73_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -21928,10 +21215,10 @@ define inreg <16 x i16> @bitcast_v4f64_to_v16i16_scalar(<4 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB73_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB73_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB73_3
; GFX11-NEXT: .LBB73_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[6:7], s[6:7], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
@@ -21939,8 +21226,6 @@ define inreg <16 x i16> @bitcast_v4f64_to_v16i16_scalar(<4 x double> inreg %a, i
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB73_3:
-; GFX11-NEXT: s_branch .LBB73_2
-; GFX11-NEXT: .LBB73_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -22183,7 +21468,7 @@ define inreg <4 x double> @bitcast_v16i16_to_v4f64_scalar(<16 x i16> inreg %a, i
; SI-NEXT: s_lshr_b32 s29, s17, 16
; SI-NEXT: s_lshr_b32 s40, s16, 16
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB75_4
+; SI-NEXT: s_cbranch_scc0 .LBB75_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s40, 16
@@ -22261,15 +21546,12 @@ define inreg <4 x double> @bitcast_v16i16_to_v4f64_scalar(<16 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v6, s10
; SI-NEXT: v_mov_b32_e32 v7, s11
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB75_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11
-; SI-NEXT: s_branch .LBB75_2
;
; VI-LABEL: bitcast_v16i16_to_v4f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB75_4
+; VI-NEXT: s_cbranch_scc0 .LBB75_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB75_3
; VI-NEXT: .LBB75_2: ; %cmp.true
@@ -22323,16 +21605,14 @@ define inreg <4 x double> @bitcast_v16i16_to_v4f64_scalar(<16 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB75_4:
-; VI-NEXT: s_branch .LBB75_2
;
; GFX9-LABEL: bitcast_v16i16_to_v4f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB75_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB75_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB75_4
+; GFX9-NEXT: s_cbranch_execnz .LBB75_3
; GFX9-NEXT: .LBB75_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
@@ -22344,8 +21624,6 @@ define inreg <4 x double> @bitcast_v16i16_to_v4f64_scalar(<16 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB75_3:
-; GFX9-NEXT: s_branch .LBB75_2
-; GFX9-NEXT: .LBB75_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -22365,10 +21643,10 @@ define inreg <4 x double> @bitcast_v16i16_to_v4f64_scalar(<16 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB75_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB75_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB75_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB75_3
; GFX11-NEXT: .LBB75_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
@@ -22380,8 +21658,6 @@ define inreg <4 x double> @bitcast_v16i16_to_v4f64_scalar(<16 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB75_3:
-; GFX11-NEXT: s_branch .LBB75_2
-; GFX11-NEXT: .LBB75_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -22545,7 +21821,7 @@ define inreg <16 x half> @bitcast_v4f64_to_v16f16_scalar(<4 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB77_3
+; SI-NEXT: s_cbranch_scc0 .LBB77_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s25, s23, 16
; SI-NEXT: s_lshr_b32 s24, s21, 16
@@ -22555,7 +21831,7 @@ define inreg <16 x half> @bitcast_v4f64_to_v16f16_scalar(<4 x double> inreg %a,
; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB77_4
+; SI-NEXT: s_cbranch_execnz .LBB77_3
; SI-NEXT: .LBB77_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -22569,18 +21845,8 @@ define inreg <16 x half> @bitcast_v4f64_to_v16f16_scalar(<4 x double> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v1
-; SI-NEXT: s_branch .LBB77_5
+; SI-NEXT: s_branch .LBB77_4
; SI-NEXT: .LBB77_3:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: s_branch .LBB77_2
-; SI-NEXT: .LBB77_4:
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v5, s21
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -22597,7 +21863,7 @@ define inreg <16 x half> @bitcast_v4f64_to_v16f16_scalar(<4 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s8
; SI-NEXT: v_mov_b32_e32 v9, s6
; SI-NEXT: v_mov_b32_e32 v8, s4
-; SI-NEXT: .LBB77_5: ; %end
+; SI-NEXT: .LBB77_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -22628,9 +21894,9 @@ define inreg <16 x half> @bitcast_v4f64_to_v16f16_scalar(<4 x double> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB77_3
+; VI-NEXT: s_cbranch_scc0 .LBB77_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB77_4
+; VI-NEXT: s_cbranch_execnz .LBB77_3
; VI-NEXT: .LBB77_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -22638,8 +21904,6 @@ define inreg <16 x half> @bitcast_v4f64_to_v16f16_scalar(<4 x double> inreg %a,
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB77_3:
-; VI-NEXT: s_branch .LBB77_2
-; VI-NEXT: .LBB77_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -22654,9 +21918,9 @@ define inreg <16 x half> @bitcast_v4f64_to_v16f16_scalar(<4 x double> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB77_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB77_4
+; GFX9-NEXT: s_cbranch_execnz .LBB77_3
; GFX9-NEXT: .LBB77_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -22664,8 +21928,6 @@ define inreg <16 x half> @bitcast_v4f64_to_v16f16_scalar(<4 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB77_3:
-; GFX9-NEXT: s_branch .LBB77_2
-; GFX9-NEXT: .LBB77_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -22685,10 +21947,10 @@ define inreg <16 x half> @bitcast_v4f64_to_v16f16_scalar(<4 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB77_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB77_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB77_3
; GFX11-NEXT: .LBB77_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[6:7], s[6:7], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
@@ -22696,8 +21958,6 @@ define inreg <16 x half> @bitcast_v4f64_to_v16f16_scalar(<4 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB77_3:
-; GFX11-NEXT: s_branch .LBB77_2
-; GFX11-NEXT: .LBB77_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -22972,7 +22232,7 @@ define inreg <4 x double> @bitcast_v16f16_to_v4f64_scalar(<16 x half> inreg %a,
; SI-NEXT: s_lshr_b32 s29, s17, 16
; SI-NEXT: s_lshr_b32 s40, s16, 16
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB79_3
+; SI-NEXT: s_cbranch_scc0 .LBB79_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s40, 16
@@ -22998,7 +22258,7 @@ define inreg <4 x double> @bitcast_v16f16_to_v4f64_scalar(<16 x half> inreg %a,
; SI-NEXT: s_and_b32 s11, s23, 0xffff
; SI-NEXT: s_lshl_b32 s12, s14, 16
; SI-NEXT: s_or_b32 s11, s11, s12
-; SI-NEXT: s_cbranch_execnz .LBB79_4
+; SI-NEXT: s_cbranch_execnz .LBB79_3
; SI-NEXT: .LBB79_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s40
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -23066,9 +22326,6 @@ define inreg <4 x double> @bitcast_v16f16_to_v4f64_scalar(<16 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v7, v9, v7
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB79_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11
-; SI-NEXT: s_branch .LBB79_2
-; SI-NEXT: .LBB79_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -23083,9 +22340,9 @@ define inreg <4 x double> @bitcast_v16f16_to_v4f64_scalar(<16 x half> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB79_3
+; VI-NEXT: s_cbranch_scc0 .LBB79_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB79_4
+; VI-NEXT: s_cbranch_execnz .LBB79_3
; VI-NEXT: .LBB79_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s23, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -23130,8 +22387,6 @@ define inreg <4 x double> @bitcast_v16f16_to_v4f64_scalar(<16 x half> inreg %a,
; VI-NEXT: v_or_b32_e32 v0, v0, v8
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB79_3:
-; VI-NEXT: s_branch .LBB79_2
-; VI-NEXT: .LBB79_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -23146,9 +22401,9 @@ define inreg <4 x double> @bitcast_v16f16_to_v4f64_scalar(<16 x half> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB79_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB79_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB79_4
+; GFX9-NEXT: s_cbranch_execnz .LBB79_3
; GFX9-NEXT: .LBB79_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v7, s23, v0 op_sel_hi:[1,0]
@@ -23161,8 +22416,6 @@ define inreg <4 x double> @bitcast_v16f16_to_v4f64_scalar(<16 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB79_3:
-; GFX9-NEXT: s_branch .LBB79_2
-; GFX9-NEXT: .LBB79_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -23182,10 +22435,10 @@ define inreg <4 x double> @bitcast_v16f16_to_v4f64_scalar(<16 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB79_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB79_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB79_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB79_3
; GFX11-NEXT: .LBB79_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
@@ -23197,8 +22450,6 @@ define inreg <4 x double> @bitcast_v16f16_to_v4f64_scalar(<16 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB79_3:
-; GFX11-NEXT: s_branch .LBB79_2
-; GFX11-NEXT: .LBB79_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -23399,7 +22650,7 @@ define inreg <16 x bfloat> @bitcast_v4f64_to_v16bf16_scalar(<4 x double> inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB81_3
+; SI-NEXT: s_cbranch_scc0 .LBB81_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s29, s23, 0xffff0000
; SI-NEXT: s_lshl_b32 s28, s23, 16
@@ -23417,7 +22668,7 @@ define inreg <16 x bfloat> @bitcast_v4f64_to_v16bf16_scalar(<4 x double> inreg %
; SI-NEXT: s_lshl_b32 s8, s17, 16
; SI-NEXT: s_and_b32 s7, s16, 0xffff0000
; SI-NEXT: s_lshl_b32 s6, s16, 16
-; SI-NEXT: s_cbranch_execnz .LBB81_4
+; SI-NEXT: s_cbranch_execnz .LBB81_3
; SI-NEXT: .LBB81_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
@@ -23439,26 +22690,8 @@ define inreg <16 x bfloat> @bitcast_v4f64_to_v16bf16_scalar(<4 x double> inreg %
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v0
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: s_branch .LBB81_5
+; SI-NEXT: s_branch .LBB81_4
; SI-NEXT: .LBB81_3:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: s_branch .LBB81_2
-; SI-NEXT: .LBB81_4:
; SI-NEXT: v_mov_b32_e32 v9, s29
; SI-NEXT: v_mov_b32_e32 v8, s28
; SI-NEXT: v_mov_b32_e32 v10, s27
@@ -23475,7 +22708,7 @@ define inreg <16 x bfloat> @bitcast_v4f64_to_v16bf16_scalar(<4 x double> inreg %
; SI-NEXT: v_mov_b32_e32 v2, s8
; SI-NEXT: v_mov_b32_e32 v1, s7
; SI-NEXT: v_mov_b32_e32 v0, s6
-; SI-NEXT: .LBB81_5: ; %end
+; SI-NEXT: .LBB81_4: ; %end
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
@@ -23514,9 +22747,9 @@ define inreg <16 x bfloat> @bitcast_v4f64_to_v16bf16_scalar(<4 x double> inreg %
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB81_3
+; VI-NEXT: s_cbranch_scc0 .LBB81_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB81_4
+; VI-NEXT: s_cbranch_execnz .LBB81_3
; VI-NEXT: .LBB81_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -23524,8 +22757,6 @@ define inreg <16 x bfloat> @bitcast_v4f64_to_v16bf16_scalar(<4 x double> inreg %
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB81_3:
-; VI-NEXT: s_branch .LBB81_2
-; VI-NEXT: .LBB81_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -23540,9 +22771,9 @@ define inreg <16 x bfloat> @bitcast_v4f64_to_v16bf16_scalar(<4 x double> inreg %
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB81_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB81_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB81_4
+; GFX9-NEXT: s_cbranch_execnz .LBB81_3
; GFX9-NEXT: .LBB81_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -23550,8 +22781,6 @@ define inreg <16 x bfloat> @bitcast_v4f64_to_v16bf16_scalar(<4 x double> inreg %
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB81_3:
-; GFX9-NEXT: s_branch .LBB81_2
-; GFX9-NEXT: .LBB81_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -23571,10 +22800,10 @@ define inreg <16 x bfloat> @bitcast_v4f64_to_v16bf16_scalar(<4 x double> inreg %
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB81_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB81_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB81_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB81_3
; GFX11-NEXT: .LBB81_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[6:7], s[6:7], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
@@ -23582,8 +22811,6 @@ define inreg <16 x bfloat> @bitcast_v4f64_to_v16bf16_scalar(<4 x double> inreg %
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB81_3:
-; GFX11-NEXT: s_branch .LBB81_2
-; GFX11-NEXT: .LBB81_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -24387,7 +23614,7 @@ define inreg <4 x double> @bitcast_v16bf16_to_v4f64_scalar(<16 x bfloat> inreg %
; SI-NEXT: v_mul_f32_e64 v25, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v11, 1.0, s7
; SI-NEXT: v_mul_f32_e64 v9, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB83_4
+; SI-NEXT: s_cbranch_scc0 .LBB83_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v32
; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v31
@@ -24457,17 +23684,14 @@ define inreg <4 x double> @bitcast_v16bf16_to_v4f64_scalar(<16 x bfloat> inreg %
; SI-NEXT: v_lshr_b64 v[7:8], v[7:8], 16
; SI-NEXT: .LBB83_3: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB83_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB83_2
;
; VI-LABEL: bitcast_v16bf16_to_v4f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB83_3
+; VI-NEXT: s_cbranch_scc0 .LBB83_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB83_4
+; VI-NEXT: s_cbranch_execnz .LBB83_3
; VI-NEXT: .LBB83_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s23, 16
; VI-NEXT: v_mov_b32_e32 v8, 0x40c00000
@@ -24619,8 +23843,6 @@ define inreg <4 x double> @bitcast_v16bf16_to_v4f64_scalar(<16 x bfloat> inreg %
; VI-NEXT: v_mov_b32_e32 v5, v8
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB83_3:
-; VI-NEXT: s_branch .LBB83_2
-; VI-NEXT: .LBB83_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -24635,9 +23857,9 @@ define inreg <4 x double> @bitcast_v16bf16_to_v4f64_scalar(<16 x bfloat> inreg %
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB83_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB83_4
+; GFX9-NEXT: s_cbranch_execnz .LBB83_3
; GFX9-NEXT: .LBB83_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s23, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -24795,8 +24017,6 @@ define inreg <4 x double> @bitcast_v16bf16_to_v4f64_scalar(<16 x bfloat> inreg %
; GFX9-NEXT: v_lshl_or_b32 v0, v9, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB83_3:
-; GFX9-NEXT: s_branch .LBB83_2
-; GFX9-NEXT: .LBB83_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -24816,10 +24036,10 @@ define inreg <4 x double> @bitcast_v16bf16_to_v4f64_scalar(<16 x bfloat> inreg %
; GFX11-TRUE16-NEXT: s_mov_b32 s4, s16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB83_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB83_3
; GFX11-TRUE16-NEXT: .LBB83_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s8, s7, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
@@ -24980,8 +24200,6 @@ define inreg <4 x double> @bitcast_v16bf16_to_v4f64_scalar(<16 x bfloat> inreg %
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB83_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB83_2
-; GFX11-TRUE16-NEXT: .LBB83_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -24997,10 +24215,10 @@ define inreg <4 x double> @bitcast_v16bf16_to_v4f64_scalar(<16 x bfloat> inreg %
; GFX11-FAKE16-NEXT: s_mov_b32 s4, s16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB83_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB83_3
; GFX11-FAKE16-NEXT: .LBB83_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s7, 16
; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff0000
@@ -25178,8 +24396,6 @@ define inreg <4 x double> @bitcast_v16bf16_to_v4f64_scalar(<16 x bfloat> inreg %
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v12, 16, v9
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB83_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB83_2
-; GFX11-FAKE16-NEXT: .LBB83_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -25742,7 +24958,7 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB85_3
+; SI-NEXT: s_cbranch_scc0 .LBB85_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s75, s23, 24
; SI-NEXT: s_lshr_b32 s74, s23, 16
@@ -25768,7 +24984,7 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 24
; SI-NEXT: s_lshr_b64 s[42:43], s[16:17], 16
; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 8
-; SI-NEXT: s_cbranch_execnz .LBB85_4
+; SI-NEXT: s_cbranch_execnz .LBB85_3
; SI-NEXT: .LBB85_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[50:51], s[22:23], 1.0
; SI-NEXT: v_add_f64 v[37:38], s[20:21], 1.0
@@ -25798,34 +25014,8 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v49
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v49
; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v49
-; SI-NEXT: s_branch .LBB85_5
+; SI-NEXT: s_branch .LBB85_4
; SI-NEXT: .LBB85_3:
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: s_branch .LBB85_2
-; SI-NEXT: .LBB85_4:
; SI-NEXT: v_mov_b32_e32 v51, s23
; SI-NEXT: v_mov_b32_e32 v38, s21
; SI-NEXT: v_mov_b32_e32 v36, s19
@@ -25858,7 +25048,7 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v25, s8
; SI-NEXT: v_mov_b32_e32 v24, s6
; SI-NEXT: v_mov_b32_e32 v27, s4
-; SI-NEXT: .LBB85_5: ; %end
+; SI-NEXT: .LBB85_4: ; %end
; SI-NEXT: v_mov_b32_e32 v2, v0
; SI-NEXT: v_mov_b32_e32 v0, v48
; SI-NEXT: v_mov_b32_e32 v4, v49
@@ -25877,7 +25067,7 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB85_3
+; VI-NEXT: s_cbranch_scc0 .LBB85_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s43, s23, 24
; VI-NEXT: s_lshr_b32 s42, s23, 16
@@ -25903,7 +25093,7 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; VI-NEXT: s_lshr_b64 s[8:9], s[20:21], 24
; VI-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB85_4
+; VI-NEXT: s_cbranch_execnz .LBB85_3
; VI-NEXT: .LBB85_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: v_add_f64 v[8:9], s[18:19], 1.0
@@ -25933,34 +25123,8 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; VI-NEXT: v_lshrrev_b32_e32 v5, 8, v1
; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
; VI-NEXT: v_lshrrev_b32_e32 v35, 8, v0
-; VI-NEXT: s_branch .LBB85_5
+; VI-NEXT: s_branch .LBB85_4
; VI-NEXT: .LBB85_3:
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr24
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr25
-; VI-NEXT: ; implicit-def: $sgpr26
-; VI-NEXT: ; implicit-def: $sgpr27
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr28
-; VI-NEXT: ; implicit-def: $sgpr29
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: s_branch .LBB85_2
-; VI-NEXT: .LBB85_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v8, s18
; VI-NEXT: v_mov_b32_e32 v16, s20
@@ -25993,7 +25157,7 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v11, s6
; VI-NEXT: v_mov_b32_e32 v19, s8
; VI-NEXT: v_mov_b32_e32 v27, s10
-; VI-NEXT: .LBB85_5: ; %end
+; VI-NEXT: .LBB85_4: ; %end
; VI-NEXT: v_mov_b32_e32 v4, v1
; VI-NEXT: v_mov_b32_e32 v12, v9
; VI-NEXT: v_mov_b32_e32 v20, v17
@@ -26008,7 +25172,7 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB85_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB85_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s43, s23, 24
; GFX9-NEXT: s_lshr_b32 s42, s23, 16
@@ -26034,7 +25198,7 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; GFX9-NEXT: s_lshr_b64 s[8:9], s[20:21], 24
; GFX9-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB85_4
+; GFX9-NEXT: s_cbranch_execnz .LBB85_3
; GFX9-NEXT: .LBB85_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[8:9], s[18:19], 1.0
@@ -26064,34 +25228,8 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 8, v0
-; GFX9-NEXT: s_branch .LBB85_5
+; GFX9-NEXT: s_branch .LBB85_4
; GFX9-NEXT: .LBB85_3:
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr24
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr25
-; GFX9-NEXT: ; implicit-def: $sgpr26
-; GFX9-NEXT: ; implicit-def: $sgpr27
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr28
-; GFX9-NEXT: ; implicit-def: $sgpr29
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: s_branch .LBB85_2
-; GFX9-NEXT: .LBB85_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v8, s18
; GFX9-NEXT: v_mov_b32_e32 v16, s20
@@ -26124,7 +25262,7 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v11, s6
; GFX9-NEXT: v_mov_b32_e32 v19, s8
; GFX9-NEXT: v_mov_b32_e32 v27, s10
-; GFX9-NEXT: .LBB85_5: ; %end
+; GFX9-NEXT: .LBB85_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v4, v1
; GFX9-NEXT: v_mov_b32_e32 v12, v9
; GFX9-NEXT: v_mov_b32_e32 v20, v17
@@ -26140,7 +25278,7 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s12, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB85_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB85_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s28, s19, 24
; GFX11-NEXT: s_lshr_b32 s27, s19, 16
@@ -26167,7 +25305,7 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-NEXT: s_cbranch_vccnz .LBB85_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB85_3
; GFX11-NEXT: .LBB85_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[32:33], s[18:19], 1.0
; GFX11-NEXT: v_add_f64 v[34:35], s[16:17], 1.0
@@ -26199,34 +25337,8 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v39
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v38
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v38
-; GFX11-NEXT: s_branch .LBB85_5
+; GFX11-NEXT: s_branch .LBB85_4
; GFX11-NEXT: .LBB85_3:
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr20
-; GFX11-NEXT: ; implicit-def: $sgpr21
-; GFX11-NEXT: ; implicit-def: $sgpr22
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr23
-; GFX11-NEXT: ; implicit-def: $sgpr24
-; GFX11-NEXT: ; implicit-def: $sgpr25
-; GFX11-NEXT: ; implicit-def: $sgpr29
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr26
-; GFX11-NEXT: ; implicit-def: $sgpr27
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: s_branch .LBB85_2
-; GFX11-NEXT: .LBB85_4:
; GFX11-NEXT: v_dual_mov_b32 v38, s0 :: v_dual_mov_b32 v33, s19
; GFX11-NEXT: v_dual_mov_b32 v36, s2 :: v_dual_mov_b32 v35, s17
; GFX11-NEXT: v_dual_mov_b32 v34, s16 :: v_dual_mov_b32 v37, s3
@@ -26247,7 +25359,7 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; GFX11-NEXT: v_mov_b32_e32 v13, s20
; GFX11-NEXT: v_mov_b32_e32 v7, s15
; GFX11-NEXT: v_mov_b32_e32 v5, s13
-; GFX11-NEXT: .LBB85_5: ; %end
+; GFX11-NEXT: .LBB85_4: ; %end
; GFX11-NEXT: v_mov_b32_e32 v0, v38
; GFX11-NEXT: v_mov_b32_e32 v4, v39
; GFX11-NEXT: v_mov_b32_e32 v8, v36
@@ -27242,7 +26354,7 @@ define inreg <4 x double> @bitcast_v32i8_to_v4f64_scalar(<32 x i8> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s62, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s63, v0
-; SI-NEXT: s_cbranch_scc0 .LBB87_4
+; SI-NEXT: s_cbranch_scc0 .LBB87_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -27432,9 +26544,6 @@ define inreg <4 x double> @bitcast_v32i8_to_v4f64_scalar(<32 x i8> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s10
; SI-NEXT: v_mov_b32_e32 v7, s11
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB87_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11
-; SI-NEXT: s_branch .LBB87_2
;
; VI-LABEL: bitcast_v32i8_to_v4f64_scalar:
; VI: ; %bb.0:
@@ -27459,7 +26568,7 @@ define inreg <4 x double> @bitcast_v32i8_to_v4f64_scalar(<32 x i8> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s46, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s47, v0
-; VI-NEXT: s_cbranch_scc0 .LBB87_4
+; VI-NEXT: s_cbranch_scc0 .LBB87_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v7, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -27595,9 +26704,6 @@ define inreg <4 x double> @bitcast_v32i8_to_v4f64_scalar(<32 x i8> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v7
; VI-NEXT: .LBB87_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB87_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; VI-NEXT: s_branch .LBB87_2
;
; GFX9-LABEL: bitcast_v32i8_to_v4f64_scalar:
; GFX9: ; %bb.0:
@@ -27622,7 +26728,7 @@ define inreg <4 x double> @bitcast_v32i8_to_v4f64_scalar(<32 x i8> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s46, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s47, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB87_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB87_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v7, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -27751,9 +26857,6 @@ define inreg <4 x double> @bitcast_v32i8_to_v4f64_scalar(<32 x i8> inreg %a, i32
; GFX9-NEXT: v_or_b32_sdwa v7, v8, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB87_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB87_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX9-NEXT: s_branch .LBB87_2
;
; GFX11-LABEL: bitcast_v32i8_to_v4f64_scalar:
; GFX11: ; %bb.0:
@@ -27775,7 +26878,7 @@ define inreg <4 x double> @bitcast_v32i8_to_v4f64_scalar(<32 x i8> inreg %a, i32
; GFX11-NEXT: v_readfirstlane_b32 s41, v0
; GFX11-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-NEXT: s_mov_b32 s42, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB87_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB87_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v3, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -27889,9 +26992,6 @@ define inreg <4 x double> @bitcast_v32i8_to_v4f64_scalar(<32 x i8> inreg %a, i32
; GFX11-NEXT: v_or_b32_e32 v7, v12, v13
; GFX11-NEXT: .LBB87_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB87_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-NEXT: s_branch .LBB87_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -28164,7 +27264,7 @@ define inreg <16 x half> @bitcast_v16i16_to_v16f16_scalar(<16 x i16> inreg %a, i
; SI-NEXT: s_lshr_b32 s40, s17, 16
; SI-NEXT: s_lshr_b32 s44, s16, 16
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB89_4
+; SI-NEXT: s_cbranch_scc0 .LBB89_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s5, s17, 0xffff
; SI-NEXT: s_lshl_b32 s7, s40, 16
@@ -28282,22 +27382,12 @@ define inreg <16 x half> @bitcast_v16i16_to_v16f16_scalar(<16 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v6, s4
; SI-NEXT: v_mov_b32_e32 v7, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB89_4:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: s_branch .LBB89_2
;
; VI-LABEL: bitcast_v16i16_to_v16f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB89_4
+; VI-NEXT: s_cbranch_scc0 .LBB89_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB89_3
; VI-NEXT: .LBB89_2: ; %cmp.true
@@ -28351,16 +27441,14 @@ define inreg <16 x half> @bitcast_v16i16_to_v16f16_scalar(<16 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB89_4:
-; VI-NEXT: s_branch .LBB89_2
;
; GFX9-LABEL: bitcast_v16i16_to_v16f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB89_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB89_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB89_4
+; GFX9-NEXT: s_cbranch_execnz .LBB89_3
; GFX9-NEXT: .LBB89_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
@@ -28372,8 +27460,6 @@ define inreg <16 x half> @bitcast_v16i16_to_v16f16_scalar(<16 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB89_3:
-; GFX9-NEXT: s_branch .LBB89_2
-; GFX9-NEXT: .LBB89_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -28393,10 +27479,10 @@ define inreg <16 x half> @bitcast_v16i16_to_v16f16_scalar(<16 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB89_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB89_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB89_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB89_3
; GFX11-NEXT: .LBB89_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
@@ -28408,8 +27494,6 @@ define inreg <16 x half> @bitcast_v16i16_to_v16f16_scalar(<16 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB89_3:
-; GFX11-NEXT: s_branch .LBB89_2
-; GFX11-NEXT: .LBB89_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -28656,9 +27740,9 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s6, s17, 16
; SI-NEXT: s_lshr_b32 s13, s16, 16
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB91_3
+; SI-NEXT: s_cbranch_scc0 .LBB91_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB91_4
+; SI-NEXT: s_cbranch_execnz .LBB91_3
; SI-NEXT: .LBB91_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s13
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -28728,10 +27812,8 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i
; SI-NEXT: v_lshr_b64 v[14:15], v[0:1], 16
; SI-NEXT: v_lshr_b64 v[12:13], v[2:3], 16
; SI-NEXT: v_lshr_b64 v[8:9], v[6:7], 16
-; SI-NEXT: s_branch .LBB91_5
+; SI-NEXT: s_branch .LBB91_4
; SI-NEXT: .LBB91_3:
-; SI-NEXT: s_branch .LBB91_2
-; SI-NEXT: .LBB91_4:
; SI-NEXT: v_mov_b32_e32 v17, s10
; SI-NEXT: v_mov_b32_e32 v21, s8
; SI-NEXT: v_mov_b32_e32 v22, s7
@@ -28748,7 +27830,7 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v12, s12
; SI-NEXT: v_mov_b32_e32 v10, s11
; SI-NEXT: v_mov_b32_e32 v8, s9
-; SI-NEXT: .LBB91_5: ; %end
+; SI-NEXT: .LBB91_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v14
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v16
; SI-NEXT: v_or_b32_e32 v0, v2, v0
@@ -28779,9 +27861,9 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB91_3
+; VI-NEXT: s_cbranch_scc0 .LBB91_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB91_4
+; VI-NEXT: s_cbranch_execnz .LBB91_3
; VI-NEXT: .LBB91_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -28826,8 +27908,6 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v8, v9
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB91_3:
-; VI-NEXT: s_branch .LBB91_2
-; VI-NEXT: .LBB91_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -28842,9 +27922,9 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB91_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB91_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB91_4
+; GFX9-NEXT: s_cbranch_execnz .LBB91_3
; GFX9-NEXT: .LBB91_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v7, s23, v0 op_sel_hi:[1,0]
@@ -28857,8 +27937,6 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB91_3:
-; GFX9-NEXT: s_branch .LBB91_2
-; GFX9-NEXT: .LBB91_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -28878,10 +27956,10 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB91_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB91_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB91_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB91_3
; GFX11-NEXT: .LBB91_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
@@ -28893,8 +27971,6 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB91_3:
-; GFX11-NEXT: s_branch .LBB91_2
-; GFX11-NEXT: .LBB91_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -29163,7 +28239,7 @@ define inreg <16 x bfloat> @bitcast_v16i16_to_v16bf16_scalar(<16 x i16> inreg %a
; SI-NEXT: s_lshr_b32 s41, s17, 16
; SI-NEXT: s_lshr_b32 s40, s16, 16
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB93_4
+; SI-NEXT: s_cbranch_scc0 .LBB93_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s25, s16, 16
; SI-NEXT: s_lshl_b32 s29, s40, 16
@@ -29273,30 +28349,12 @@ define inreg <16 x bfloat> @bitcast_v16i16_to_v16bf16_scalar(<16 x i16> inreg %a
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s6
; SI-NEXT: v_lshr_b64 v[7:8], v[7:8], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB93_4:
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: s_branch .LBB93_2
;
; VI-LABEL: bitcast_v16i16_to_v16bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB93_4
+; VI-NEXT: s_cbranch_scc0 .LBB93_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB93_3
; VI-NEXT: .LBB93_2: ; %cmp.true
@@ -29350,16 +28408,14 @@ define inreg <16 x bfloat> @bitcast_v16i16_to_v16bf16_scalar(<16 x i16> inreg %a
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB93_4:
-; VI-NEXT: s_branch .LBB93_2
;
; GFX9-LABEL: bitcast_v16i16_to_v16bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB93_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB93_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB93_4
+; GFX9-NEXT: s_cbranch_execnz .LBB93_3
; GFX9-NEXT: .LBB93_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
@@ -29371,8 +28427,6 @@ define inreg <16 x bfloat> @bitcast_v16i16_to_v16bf16_scalar(<16 x i16> inreg %a
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB93_3:
-; GFX9-NEXT: s_branch .LBB93_2
-; GFX9-NEXT: .LBB93_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -29392,10 +28446,10 @@ define inreg <16 x bfloat> @bitcast_v16i16_to_v16bf16_scalar(<16 x i16> inreg %a
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB93_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB93_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB93_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB93_3
; GFX11-NEXT: .LBB93_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
@@ -29407,8 +28461,6 @@ define inreg <16 x bfloat> @bitcast_v16i16_to_v16bf16_scalar(<16 x i16> inreg %a
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB93_3:
-; GFX11-NEXT: s_branch .LBB93_2
-; GFX11-NEXT: .LBB93_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -30249,7 +29301,7 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a
; SI-NEXT: v_mul_f32_e64 v15, 1.0, s6
; SI-NEXT: v_mul_f32_e64 v19, 1.0, s5
; SI-NEXT: v_mul_f32_e64 v8, 1.0, s4
-; SI-NEXT: s_cbranch_scc0 .LBB95_4
+; SI-NEXT: s_cbranch_scc0 .LBB95_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v31
; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v1
@@ -30351,32 +29403,14 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v16
; SI-NEXT: v_or_b32_e32 v7, v7, v8
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB95_4:
-; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: ; implicit-def: $vgpr23
-; SI-NEXT: ; implicit-def: $vgpr22
-; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr24
-; SI-NEXT: ; implicit-def: $vgpr20
-; SI-NEXT: ; implicit-def: $vgpr13
-; SI-NEXT: ; implicit-def: $vgpr25
-; SI-NEXT: ; implicit-def: $vgpr18
-; SI-NEXT: ; implicit-def: $vgpr14
-; SI-NEXT: ; implicit-def: $vgpr26
-; SI-NEXT: ; implicit-def: $vgpr16
-; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr9
-; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: s_branch .LBB95_2
;
; VI-LABEL: bitcast_v16bf16_to_v16i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB95_3
+; VI-NEXT: s_cbranch_scc0 .LBB95_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB95_4
+; VI-NEXT: s_cbranch_execnz .LBB95_3
; VI-NEXT: .LBB95_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v10, 0x40c00000
@@ -30529,8 +29563,6 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a
; VI-NEXT: v_mov_b32_e32 v7, v15
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB95_3:
-; VI-NEXT: s_branch .LBB95_2
-; VI-NEXT: .LBB95_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -30545,9 +29577,9 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB95_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB95_4
+; GFX9-NEXT: s_cbranch_execnz .LBB95_3
; GFX9-NEXT: .LBB95_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s16, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v1, 0x40c00000
@@ -30697,8 +29729,6 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a
; GFX9-NEXT: v_and_or_b32 v0, v0, v13, v8
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB95_3:
-; GFX9-NEXT: s_branch .LBB95_2
-; GFX9-NEXT: .LBB95_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -30718,10 +29748,10 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: s_mov_b32 s4, s16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB95_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB95_3
; GFX11-TRUE16-NEXT: .LBB95_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s8, s0, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -30860,8 +29890,6 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v17.h
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB95_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB95_2
-; GFX11-TRUE16-NEXT: .LBB95_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -30877,10 +29905,10 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: s_mov_b32 s4, s16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB95_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB95_3
; GFX11-FAKE16-NEXT: .LBB95_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_and_b32 s8, s0, 0xffff0000
; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -31030,8 +30058,6 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v15
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB95_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB95_2
-; GFX11-FAKE16-NEXT: .LBB95_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -31715,7 +30741,7 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32
; SI-NEXT: s_lshr_b32 s72, s17, 16
; SI-NEXT: s_lshr_b32 s76, s16, 16
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB97_4
+; SI-NEXT: s_cbranch_scc0 .LBB97_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s76, 16
@@ -31861,38 +30887,12 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v30, s75
; SI-NEXT: v_mov_b32_e32 v31, s89
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB97_4:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: s_branch .LBB97_2
;
; VI-LABEL: bitcast_v16i16_to_v32i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB97_4
+; VI-NEXT: s_cbranch_scc0 .LBB97_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s14, s23, 24
; VI-NEXT: s_lshr_b32 s15, s23, 16
@@ -32018,38 +31018,12 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v30, s15
; VI-NEXT: v_mov_b32_e32 v31, s14
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB97_4:
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr29
-; VI-NEXT: ; implicit-def: $sgpr28
-; VI-NEXT: ; implicit-def: $sgpr27
-; VI-NEXT: ; implicit-def: $sgpr26
-; VI-NEXT: ; implicit-def: $sgpr25
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr24
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB97_2
;
; GFX9-LABEL: bitcast_v16i16_to_v32i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB97_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB97_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s14, s23, 24
; GFX9-NEXT: s_lshr_b32 s15, s23, 16
@@ -32075,7 +31049,7 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32
; GFX9-NEXT: s_lshr_b64 s[8:9], s[20:21], 24
; GFX9-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB97_4
+; GFX9-NEXT: s_cbranch_execnz .LBB97_3
; GFX9-NEXT: .LBB97_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
@@ -32109,34 +31083,8 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 8, v0
-; GFX9-NEXT: s_branch .LBB97_5
+; GFX9-NEXT: s_branch .LBB97_4
; GFX9-NEXT: .LBB97_3:
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr29
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr28
-; GFX9-NEXT: ; implicit-def: $sgpr27
-; GFX9-NEXT: ; implicit-def: $sgpr26
-; GFX9-NEXT: ; implicit-def: $sgpr24
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr25
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB97_2
-; GFX9-NEXT: .LBB97_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v8, s18
@@ -32169,7 +31117,7 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v19, s8
; GFX9-NEXT: v_mov_b32_e32 v11, s6
; GFX9-NEXT: v_mov_b32_e32 v3, s4
-; GFX9-NEXT: .LBB97_5: ; %end
+; GFX9-NEXT: .LBB97_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v4, v1
; GFX9-NEXT: v_mov_b32_e32 v12, v9
; GFX9-NEXT: v_mov_b32_e32 v20, v17
@@ -32185,7 +31133,7 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s12, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB97_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB97_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s13, s19, 24
; GFX11-NEXT: s_lshr_b32 s14, s19, 16
@@ -32212,7 +31160,7 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-NEXT: s_cbranch_vccnz .LBB97_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB97_3
; GFX11-NEXT: .LBB97_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v39, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v37, s3, 3 op_sel_hi:[1,0]
@@ -32246,34 +31194,8 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v39
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v38
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v38
-; GFX11-NEXT: s_branch .LBB97_5
+; GFX11-NEXT: s_branch .LBB97_4
; GFX11-NEXT: .LBB97_3:
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr29
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr27
-; GFX11-NEXT: ; implicit-def: $sgpr26
-; GFX11-NEXT: ; implicit-def: $sgpr24
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr25
-; GFX11-NEXT: ; implicit-def: $sgpr23
-; GFX11-NEXT: ; implicit-def: $sgpr22
-; GFX11-NEXT: ; implicit-def: $sgpr21
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr20
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: s_branch .LBB97_2
-; GFX11-NEXT: .LBB97_4:
; GFX11-NEXT: v_dual_mov_b32 v38, s0 :: v_dual_mov_b32 v39, s1
; GFX11-NEXT: v_dual_mov_b32 v36, s2 :: v_dual_mov_b32 v37, s3
; GFX11-NEXT: v_dual_mov_b32 v34, s16 :: v_dual_mov_b32 v35, s17
@@ -32294,7 +31216,7 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32
; GFX11-NEXT: v_mov_b32_e32 v19, s8
; GFX11-NEXT: v_mov_b32_e32 v11, s6
; GFX11-NEXT: v_mov_b32_e32 v3, s4
-; GFX11-NEXT: .LBB97_5: ; %end
+; GFX11-NEXT: .LBB97_4: ; %end
; GFX11-NEXT: v_mov_b32_e32 v0, v38
; GFX11-NEXT: v_mov_b32_e32 v4, v39
; GFX11-NEXT: v_mov_b32_e32 v8, v36
@@ -33340,7 +32262,7 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s46, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s47, v0
-; SI-NEXT: s_cbranch_scc0 .LBB99_4
+; SI-NEXT: s_cbranch_scc0 .LBB99_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s8, s20, 0xff
; SI-NEXT: s_lshl_b32 s9, s21, 8
@@ -33574,20 +32496,6 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s8
; SI-NEXT: v_mov_b32_e32 v7, s9
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB99_4:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: s_branch .LBB99_2
;
; VI-LABEL: bitcast_v32i8_to_v16i16_scalar:
; VI: ; %bb.0:
@@ -33612,7 +32520,7 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s10, v0
-; VI-NEXT: s_cbranch_scc0 .LBB99_4
+; VI-NEXT: s_cbranch_scc0 .LBB99_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v7, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -33748,9 +32656,6 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v7
; VI-NEXT: .LBB99_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB99_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; VI-NEXT: s_branch .LBB99_2
;
; GFX9-LABEL: bitcast_v32i8_to_v16i16_scalar:
; GFX9: ; %bb.0:
@@ -33775,7 +32680,7 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s8, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB99_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB99_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v0, s17
; GFX9-NEXT: v_mov_b32_e32 v4, 0xc0c0004
@@ -33917,9 +32822,6 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; GFX9-NEXT: v_lshl_or_b32 v7, v15, 16, v7
; GFX9-NEXT: .LBB99_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB99_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX9-NEXT: s_branch .LBB99_2
;
; GFX11-TRUE16-LABEL: bitcast_v32i8_to_v16i16_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -33941,7 +32843,7 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s12, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB99_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB99_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, 0xc0c0004
; GFX11-TRUE16-NEXT: s_and_b32 s43, s28, 0xff
@@ -34046,9 +32948,6 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v9.l
; GFX11-TRUE16-NEXT: .LBB99_3: ; %end
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB99_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-TRUE16-NEXT: s_branch .LBB99_2
;
; GFX11-FAKE16-LABEL: bitcast_v32i8_to_v16i16_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -34070,7 +32969,7 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s9, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s42, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB99_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB99_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, 0xc0c0004
; GFX11-FAKE16-NEXT: s_and_b32 s43, s28, 0xff
@@ -34188,9 +33087,6 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v7, 16, v14
; GFX11-FAKE16-NEXT: .LBB99_3: ; %end
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB99_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-FAKE16-NEXT: s_branch .LBB99_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -34487,7 +33383,7 @@ define inreg <16 x bfloat> @bitcast_v16f16_to_v16bf16_scalar(<16 x half> inreg %
; SI-NEXT: s_lshr_b32 s7, s17, 16
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB101_3
+; SI-NEXT: s_cbranch_scc0 .LBB101_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s13, s16, 16
; SI-NEXT: s_lshl_b32 s15, s6, 16
@@ -34505,7 +33401,7 @@ define inreg <16 x bfloat> @bitcast_v16f16_to_v16bf16_scalar(<16 x half> inreg %
; SI-NEXT: s_lshl_b32 s45, s12, 16
; SI-NEXT: s_lshl_b32 s46, s23, 16
; SI-NEXT: s_lshl_b32 s47, s14, 16
-; SI-NEXT: s_cbranch_execnz .LBB101_4
+; SI-NEXT: s_cbranch_execnz .LBB101_3
; SI-NEXT: .LBB101_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s14
; SI-NEXT: v_cvt_f32_f16_e32 v1, s23
@@ -34571,26 +33467,8 @@ define inreg <16 x bfloat> @bitcast_v16f16_to_v16bf16_scalar(<16 x half> inreg %
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
-; SI-NEXT: s_branch .LBB101_5
+; SI-NEXT: s_branch .LBB101_4
; SI-NEXT: .LBB101_3:
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: s_branch .LBB101_2
-; SI-NEXT: .LBB101_4:
; SI-NEXT: v_mov_b32_e32 v9, s47
; SI-NEXT: v_mov_b32_e32 v8, s46
; SI-NEXT: v_mov_b32_e32 v10, s45
@@ -34607,7 +33485,7 @@ define inreg <16 x bfloat> @bitcast_v16f16_to_v16bf16_scalar(<16 x half> inreg %
; SI-NEXT: v_mov_b32_e32 v2, s24
; SI-NEXT: v_mov_b32_e32 v1, s15
; SI-NEXT: v_mov_b32_e32 v0, s13
-; SI-NEXT: .LBB101_5: ; %end
+; SI-NEXT: .LBB101_4: ; %end
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
@@ -34646,9 +33524,9 @@ define inreg <16 x bfloat> @bitcast_v16f16_to_v16bf16_scalar(<16 x half> inreg %
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB101_3
+; VI-NEXT: s_cbranch_scc0 .LBB101_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB101_4
+; VI-NEXT: s_cbranch_execnz .LBB101_3
; VI-NEXT: .LBB101_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -34693,8 +33571,6 @@ define inreg <16 x bfloat> @bitcast_v16f16_to_v16bf16_scalar(<16 x half> inreg %
; VI-NEXT: v_or_b32_e32 v0, v8, v9
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB101_3:
-; VI-NEXT: s_branch .LBB101_2
-; VI-NEXT: .LBB101_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -34709,9 +33585,9 @@ define inreg <16 x bfloat> @bitcast_v16f16_to_v16bf16_scalar(<16 x half> inreg %
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB101_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB101_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB101_4
+; GFX9-NEXT: s_cbranch_execnz .LBB101_3
; GFX9-NEXT: .LBB101_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v7, s23, v0 op_sel_hi:[1,0]
@@ -34724,8 +33600,6 @@ define inreg <16 x bfloat> @bitcast_v16f16_to_v16bf16_scalar(<16 x half> inreg %
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB101_3:
-; GFX9-NEXT: s_branch .LBB101_2
-; GFX9-NEXT: .LBB101_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -34745,10 +33619,10 @@ define inreg <16 x bfloat> @bitcast_v16f16_to_v16bf16_scalar(<16 x half> inreg %
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB101_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB101_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB101_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB101_3
; GFX11-NEXT: .LBB101_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
@@ -34760,8 +33634,6 @@ define inreg <16 x bfloat> @bitcast_v16f16_to_v16bf16_scalar(<16 x half> inreg %
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB101_3:
-; GFX11-NEXT: s_branch .LBB101_2
-; GFX11-NEXT: .LBB101_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -35620,7 +34492,7 @@ define inreg <16 x half> @bitcast_v16bf16_to_v16f16_scalar(<16 x bfloat> inreg %
; SI-NEXT: v_mul_f32_e64 v29, 1.0, s15
; SI-NEXT: v_mul_f32_e64 v27, 1.0, s11
; SI-NEXT: v_mul_f32_e64 v24, 1.0, s7
-; SI-NEXT: s_cbranch_scc0 .LBB103_4
+; SI-NEXT: s_cbranch_scc0 .LBB103_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v37
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v36
@@ -35730,28 +34602,14 @@ define inreg <16 x half> @bitcast_v16bf16_to_v16f16_scalar(<16 x bfloat> inreg %
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v16
; SI-NEXT: v_or_b32_e32 v7, v7, v8
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB103_4:
-; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: ; implicit-def: $vgpr13
-; SI-NEXT: ; implicit-def: $vgpr21
-; SI-NEXT: ; implicit-def: $vgpr33
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr22
-; SI-NEXT: ; implicit-def: $vgpr8
-; SI-NEXT: ; implicit-def: $vgpr25
-; SI-NEXT: ; implicit-def: $vgpr34
-; SI-NEXT: ; implicit-def: $vgpr10
-; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: s_branch .LBB103_2
;
; VI-LABEL: bitcast_v16bf16_to_v16f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB103_3
+; VI-NEXT: s_cbranch_scc0 .LBB103_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB103_4
+; VI-NEXT: s_cbranch_execnz .LBB103_3
; VI-NEXT: .LBB103_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v10, 0x40c00000
@@ -35904,8 +34762,6 @@ define inreg <16 x half> @bitcast_v16bf16_to_v16f16_scalar(<16 x bfloat> inreg %
; VI-NEXT: v_mov_b32_e32 v7, v15
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB103_3:
-; VI-NEXT: s_branch .LBB103_2
-; VI-NEXT: .LBB103_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -35920,9 +34776,9 @@ define inreg <16 x half> @bitcast_v16bf16_to_v16f16_scalar(<16 x bfloat> inreg %
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB103_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB103_4
+; GFX9-NEXT: s_cbranch_execnz .LBB103_3
; GFX9-NEXT: .LBB103_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s16, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v1, 0x40c00000
@@ -36080,8 +34936,6 @@ define inreg <16 x half> @bitcast_v16bf16_to_v16f16_scalar(<16 x bfloat> inreg %
; GFX9-NEXT: v_lshl_or_b32 v0, v16, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB103_3:
-; GFX9-NEXT: s_branch .LBB103_2
-; GFX9-NEXT: .LBB103_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -36101,10 +34955,10 @@ define inreg <16 x half> @bitcast_v16bf16_to_v16f16_scalar(<16 x bfloat> inreg %
; GFX11-TRUE16-NEXT: s_mov_b32 s4, s16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB103_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB103_3
; GFX11-TRUE16-NEXT: .LBB103_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s8, s0, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -36261,8 +35115,6 @@ define inreg <16 x half> @bitcast_v16bf16_to_v16f16_scalar(<16 x bfloat> inreg %
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v15.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB103_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB103_2
-; GFX11-TRUE16-NEXT: .LBB103_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -36278,10 +35130,10 @@ define inreg <16 x half> @bitcast_v16bf16_to_v16f16_scalar(<16 x bfloat> inreg %
; GFX11-FAKE16-NEXT: s_mov_b32 s4, s16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB103_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB103_3
; GFX11-FAKE16-NEXT: .LBB103_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_and_b32 s8, s0, 0xffff0000
; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -36448,8 +35300,6 @@ define inreg <16 x half> @bitcast_v16bf16_to_v16f16_scalar(<16 x bfloat> inreg %
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v14
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB103_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB103_2
-; GFX11-FAKE16-NEXT: .LBB103_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -37140,7 +35990,7 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; SI-NEXT: s_lshr_b32 s72, s17, 16
; SI-NEXT: s_lshr_b32 s73, s16, 16
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB105_3
+; SI-NEXT: s_cbranch_scc0 .LBB105_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s73, 16
@@ -37186,7 +36036,7 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; SI-NEXT: s_bfe_u32 s25, s74, 0x80008
; SI-NEXT: s_bfe_u32 s29, s76, 0x80008
; SI-NEXT: s_bfe_u32 s41, s78, 0x80008
-; SI-NEXT: s_cbranch_execnz .LBB105_4
+; SI-NEXT: s_cbranch_execnz .LBB105_3
; SI-NEXT: .LBB105_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s79
; SI-NEXT: v_cvt_f32_f16_e32 v1, s22
@@ -37272,34 +36122,8 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; SI-NEXT: v_lshr_b64 v[3:4], v[48:49], 24
; SI-NEXT: v_lshr_b64 v[1:2], v[48:49], 8
; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v49
-; SI-NEXT: s_branch .LBB105_5
+; SI-NEXT: s_branch .LBB105_4
; SI-NEXT: .LBB105_3:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr5
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr24
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: s_branch .LBB105_2
-; SI-NEXT: .LBB105_4:
; SI-NEXT: v_mov_b32_e32 v30, s78
; SI-NEXT: v_mov_b32_e32 v22, s76
; SI-NEXT: v_mov_b32_e32 v14, s74
@@ -37332,7 +36156,7 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v27, s46
; SI-NEXT: v_mov_b32_e32 v24, s56
; SI-NEXT: v_mov_b32_e32 v25, s58
-; SI-NEXT: .LBB105_5: ; %end
+; SI-NEXT: .LBB105_4: ; %end
; SI-NEXT: v_mov_b32_e32 v2, v0
; SI-NEXT: v_mov_b32_e32 v0, v48
; SI-NEXT: v_mov_b32_e32 v4, v49
@@ -37351,7 +36175,7 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB105_3
+; VI-NEXT: s_cbranch_scc0 .LBB105_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s40, s23, 24
; VI-NEXT: s_lshr_b32 s44, s23, 16
@@ -37377,7 +36201,7 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; VI-NEXT: s_lshr_b64 s[8:9], s[20:21], 24
; VI-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB105_4
+; VI-NEXT: s_cbranch_execnz .LBB105_3
; VI-NEXT: .LBB105_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v1, 0x200
@@ -37436,34 +36260,8 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; VI-NEXT: v_bfe_u32 v23, v22, 8, 8
; VI-NEXT: v_bfe_u32 v15, v14, 8, 8
; VI-NEXT: v_bfe_u32 v7, v6, 8, 8
-; VI-NEXT: s_branch .LBB105_5
+; VI-NEXT: s_branch .LBB105_4
; VI-NEXT: .LBB105_3:
-; VI-NEXT: ; implicit-def: $sgpr26
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr27
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr28
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr24
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr29
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr25
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: s_branch .LBB105_2
-; VI-NEXT: .LBB105_4:
; VI-NEXT: v_mov_b32_e32 v2, s59
; VI-NEXT: v_mov_b32_e32 v6, s58
; VI-NEXT: v_mov_b32_e32 v10, s57
@@ -37496,7 +36294,7 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v19, s8
; VI-NEXT: v_mov_b32_e32 v11, s6
; VI-NEXT: v_mov_b32_e32 v3, s4
-; VI-NEXT: .LBB105_5: ; %end
+; VI-NEXT: .LBB105_4: ; %end
; VI-NEXT: v_mov_b32_e32 v4, v35
; VI-NEXT: v_mov_b32_e32 v12, v34
; VI-NEXT: v_mov_b32_e32 v20, v33
@@ -37507,7 +36305,7 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB105_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB105_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s14, s23, 24
; GFX9-NEXT: s_lshr_b32 s15, s23, 16
@@ -37533,7 +36331,7 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; GFX9-NEXT: s_lshr_b64 s[8:9], s[20:21], 24
; GFX9-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB105_4
+; GFX9-NEXT: s_cbranch_execnz .LBB105_3
; GFX9-NEXT: .LBB105_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v2, 0x200
; GFX9-NEXT: v_pk_add_f16 v1, s17, v2 op_sel_hi:[1,0]
@@ -37568,34 +36366,8 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 8, v0
-; GFX9-NEXT: s_branch .LBB105_5
+; GFX9-NEXT: s_branch .LBB105_4
; GFX9-NEXT: .LBB105_3:
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr29
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr28
-; GFX9-NEXT: ; implicit-def: $sgpr27
-; GFX9-NEXT: ; implicit-def: $sgpr26
-; GFX9-NEXT: ; implicit-def: $sgpr24
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr25
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB105_2
-; GFX9-NEXT: .LBB105_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v8, s18
@@ -37628,7 +36400,7 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v19, s8
; GFX9-NEXT: v_mov_b32_e32 v11, s6
; GFX9-NEXT: v_mov_b32_e32 v3, s4
-; GFX9-NEXT: .LBB105_5: ; %end
+; GFX9-NEXT: .LBB105_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v4, v1
; GFX9-NEXT: v_mov_b32_e32 v12, v9
; GFX9-NEXT: v_mov_b32_e32 v20, v17
@@ -37644,7 +36416,7 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s12, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB105_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB105_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s13, s19, 24
; GFX11-NEXT: s_lshr_b32 s14, s19, 16
@@ -37671,7 +36443,7 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-NEXT: s_cbranch_vccnz .LBB105_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB105_3
; GFX11-NEXT: .LBB105_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v39, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v37, 0x200, s3 op_sel_hi:[0,1]
@@ -37705,34 +36477,8 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v39
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v38
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v38
-; GFX11-NEXT: s_branch .LBB105_5
+; GFX11-NEXT: s_branch .LBB105_4
; GFX11-NEXT: .LBB105_3:
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr29
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr27
-; GFX11-NEXT: ; implicit-def: $sgpr26
-; GFX11-NEXT: ; implicit-def: $sgpr24
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr25
-; GFX11-NEXT: ; implicit-def: $sgpr23
-; GFX11-NEXT: ; implicit-def: $sgpr22
-; GFX11-NEXT: ; implicit-def: $sgpr21
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr20
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: s_branch .LBB105_2
-; GFX11-NEXT: .LBB105_4:
; GFX11-NEXT: v_dual_mov_b32 v38, s0 :: v_dual_mov_b32 v39, s1
; GFX11-NEXT: v_dual_mov_b32 v36, s2 :: v_dual_mov_b32 v37, s3
; GFX11-NEXT: v_dual_mov_b32 v34, s16 :: v_dual_mov_b32 v35, s17
@@ -37753,7 +36499,7 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; GFX11-NEXT: v_mov_b32_e32 v19, s8
; GFX11-NEXT: v_mov_b32_e32 v11, s6
; GFX11-NEXT: v_mov_b32_e32 v3, s4
-; GFX11-NEXT: .LBB105_5: ; %end
+; GFX11-NEXT: .LBB105_4: ; %end
; GFX11-NEXT: v_mov_b32_e32 v0, v38
; GFX11-NEXT: v_mov_b32_e32 v4, v39
; GFX11-NEXT: v_mov_b32_e32 v8, v36
@@ -38799,7 +37545,7 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s46, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s47, v0
-; SI-NEXT: s_cbranch_scc0 .LBB107_4
+; SI-NEXT: s_cbranch_scc0 .LBB107_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s8, s20, 0xff
; SI-NEXT: s_lshl_b32 s9, s21, 8
@@ -39033,20 +37779,6 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s8
; SI-NEXT: v_mov_b32_e32 v7, s9
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB107_4:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: s_branch .LBB107_2
;
; VI-LABEL: bitcast_v32i8_to_v16f16_scalar:
; VI: ; %bb.0:
@@ -39071,7 +37803,7 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s10, v0
-; VI-NEXT: s_cbranch_scc0 .LBB107_4
+; VI-NEXT: s_cbranch_scc0 .LBB107_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v7, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -39207,9 +37939,6 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v7
; VI-NEXT: .LBB107_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB107_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; VI-NEXT: s_branch .LBB107_2
;
; GFX9-LABEL: bitcast_v32i8_to_v16f16_scalar:
; GFX9: ; %bb.0:
@@ -39234,7 +37963,7 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s8, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB107_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB107_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v0, s17
; GFX9-NEXT: v_mov_b32_e32 v4, 0xc0c0004
@@ -39376,9 +38105,6 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; GFX9-NEXT: v_lshl_or_b32 v7, v15, 16, v7
; GFX9-NEXT: .LBB107_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB107_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX9-NEXT: s_branch .LBB107_2
;
; GFX11-TRUE16-LABEL: bitcast_v32i8_to_v16f16_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -39400,7 +38126,7 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s12, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB107_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB107_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, 0xc0c0004
; GFX11-TRUE16-NEXT: s_and_b32 s43, s28, 0xff
@@ -39505,9 +38231,6 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v9.l
; GFX11-TRUE16-NEXT: .LBB107_3: ; %end
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB107_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-TRUE16-NEXT: s_branch .LBB107_2
;
; GFX11-FAKE16-LABEL: bitcast_v32i8_to_v16f16_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -39529,7 +38252,7 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s9, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s42, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB107_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB107_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, 0xc0c0004
; GFX11-FAKE16-NEXT: s_and_b32 s43, s28, 0xff
@@ -39647,9 +38370,6 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v7, 16, v14
; GFX11-FAKE16-NEXT: .LBB107_3: ; %end
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB107_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-FAKE16-NEXT: s_branch .LBB107_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -40901,7 +39621,7 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; SI-NEXT: v_mul_f32_e64 v58, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v24, 1.0, s7
; SI-NEXT: v_mul_f32_e64 v29, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB109_4
+; SI-NEXT: s_cbranch_scc0 .LBB109_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v47
; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v57
@@ -41040,38 +39760,12 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, v35
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB109_4:
-; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: ; implicit-def: $vgpr48
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr10
-; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: ; implicit-def: $vgpr9
-; SI-NEXT: ; implicit-def: $vgpr36
-; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: ; implicit-def: $vgpr20
-; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: ; implicit-def: $vgpr33
-; SI-NEXT: ; implicit-def: $vgpr23
-; SI-NEXT: ; implicit-def: $vgpr35
-; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: ; implicit-def: $vgpr17
-; SI-NEXT: ; implicit-def: $vgpr34
-; SI-NEXT: ; implicit-def: $vgpr19
-; SI-NEXT: ; implicit-def: $vgpr25
-; SI-NEXT: ; implicit-def: $vgpr37
-; SI-NEXT: ; implicit-def: $vgpr27
-; SI-NEXT: ; implicit-def: $vgpr50
-; SI-NEXT: ; implicit-def: $vgpr53
-; SI-NEXT: ; implicit-def: $vgpr39
-; SI-NEXT: ; implicit-def: $vgpr42
-; SI-NEXT: s_branch .LBB109_2
;
; VI-LABEL: bitcast_v16bf16_to_v32i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB109_3
+; VI-NEXT: s_cbranch_scc0 .LBB109_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s57, s23, 24
; VI-NEXT: s_lshr_b32 s56, s23, 16
@@ -41097,7 +39791,7 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; VI-NEXT: s_lshr_b64 s[8:9], s[20:21], 24
; VI-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB109_4
+; VI-NEXT: s_cbranch_execnz .LBB109_3
; VI-NEXT: .LBB109_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v2, 0x40c00000
@@ -41272,34 +39966,8 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v5, 8, v4
; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v0
-; VI-NEXT: s_branch .LBB109_5
+; VI-NEXT: s_branch .LBB109_4
; VI-NEXT: .LBB109_3:
-; VI-NEXT: ; implicit-def: $sgpr25
-; VI-NEXT: ; implicit-def: $sgpr26
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr24
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr27
-; VI-NEXT: ; implicit-def: $sgpr28
-; VI-NEXT: ; implicit-def: $sgpr29
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: s_branch .LBB109_2
-; VI-NEXT: .LBB109_4:
; VI-NEXT: v_mov_b32_e32 v26, s59
; VI-NEXT: v_mov_b32_e32 v25, s58
; VI-NEXT: v_mov_b32_e32 v31, s57
@@ -41332,7 +40000,7 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; VI-NEXT: v_mov_b32_e32 v37, s8
; VI-NEXT: v_mov_b32_e32 v34, s6
; VI-NEXT: v_mov_b32_e32 v32, s4
-; VI-NEXT: .LBB109_5: ; %end
+; VI-NEXT: .LBB109_4: ; %end
; VI-NEXT: v_mov_b32_e32 v3, v32
; VI-NEXT: v_mov_b32_e32 v11, v34
; VI-NEXT: v_mov_b32_e32 v19, v37
@@ -41343,7 +40011,7 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB109_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s42, s23, 24
; GFX9-NEXT: s_lshr_b32 s59, s23, 16
@@ -41369,7 +40037,7 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; GFX9-NEXT: s_lshr_b64 s[8:9], s[20:21], 24
; GFX9-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB109_4
+; GFX9-NEXT: s_cbranch_execnz .LBB109_3
; GFX9-NEXT: .LBB109_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s17, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v5, 0x40c00000
@@ -41552,34 +40220,8 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v3
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v3
; GFX9-NEXT: v_lshrrev_b64 v[3:4], 24, v[3:4]
-; GFX9-NEXT: s_branch .LBB109_5
+; GFX9-NEXT: s_branch .LBB109_4
; GFX9-NEXT: .LBB109_3:
-; GFX9-NEXT: ; implicit-def: $sgpr25
-; GFX9-NEXT: ; implicit-def: $sgpr26
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr29
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr27
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr24
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr28
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: s_branch .LBB109_2
-; GFX9-NEXT: .LBB109_4:
; GFX9-NEXT: v_mov_b32_e32 v24, s22
; GFX9-NEXT: v_mov_b32_e32 v32, s23
; GFX9-NEXT: v_mov_b32_e32 v30, s59
@@ -41612,7 +40254,7 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v19, s8
; GFX9-NEXT: v_mov_b32_e32 v11, s6
; GFX9-NEXT: v_mov_b32_e32 v3, s4
-; GFX9-NEXT: .LBB109_5: ; %end
+; GFX9-NEXT: .LBB109_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v4, v35
; GFX9-NEXT: v_mov_b32_e32 v12, v34
; GFX9-NEXT: v_mov_b32_e32 v20, v33
@@ -41624,7 +40266,7 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s12, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB109_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s27, s19, 24
; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s19, 16
@@ -41651,7 +40293,7 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-TRUE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB109_3
; GFX11-TRUE16-NEXT: .LBB109_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s1, 16
; GFX11-TRUE16-NEXT: s_and_b32 s1, s1, 0xffff0000
@@ -41837,34 +40479,8 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 8, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX11-TRUE16-NEXT: s_branch .LBB109_5
+; GFX11-TRUE16-NEXT: s_branch .LBB109_4
; GFX11-TRUE16-NEXT: .LBB109_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr20
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr21
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr24
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr25
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr22
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr28
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr29
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr26
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr23
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr27
-; GFX11-TRUE16-NEXT: s_branch .LBB109_2
-; GFX11-TRUE16-NEXT: .LBB109_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v33, s17
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s19 :: v_dual_mov_b32 v35, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s46 :: v_dual_mov_b32 v25, s41
@@ -41883,7 +40499,7 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v19, s8
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v11, s6
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s4
-; GFX11-TRUE16-NEXT: .LBB109_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB109_4: ; %end
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v35
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v12, v34
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v20, v33
@@ -41895,7 +40511,7 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s12, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB109_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s27, s19, 24
; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s19, 16
@@ -41922,7 +40538,7 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-FAKE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB109_3
; GFX11-FAKE16-NEXT: .LBB109_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s1, 16
; GFX11-FAKE16-NEXT: s_and_b32 s1, s1, 0xffff0000
@@ -42113,34 +40729,8 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 8, v2
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX11-FAKE16-NEXT: s_branch .LBB109_5
+; GFX11-FAKE16-NEXT: s_branch .LBB109_4
; GFX11-FAKE16-NEXT: .LBB109_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr20
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr21
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr24
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr25
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr22
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr28
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr29
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr26
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr23
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr27
-; GFX11-FAKE16-NEXT: s_branch .LBB109_2
-; GFX11-FAKE16-NEXT: .LBB109_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v33, s17
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s19 :: v_dual_mov_b32 v35, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s46 :: v_dual_mov_b32 v25, s41
@@ -42159,7 +40749,7 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v19, s8
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v11, s6
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, s4
-; GFX11-FAKE16-NEXT: .LBB109_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB109_4: ; %end
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v35
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v12, v34
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v20, v33
@@ -43205,7 +41795,7 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s6, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s7, v0
-; SI-NEXT: s_cbranch_scc0 .LBB111_4
+; SI-NEXT: s_cbranch_scc0 .LBB111_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s4, s4, 16
@@ -43427,24 +42017,6 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s75
; SI-NEXT: v_lshr_b64 v[7:8], v[7:8], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB111_4:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: s_branch .LBB111_2
;
; VI-LABEL: bitcast_v32i8_to_v16bf16_scalar:
; VI: ; %bb.0:
@@ -43469,7 +42041,7 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s10, v0
-; VI-NEXT: s_cbranch_scc0 .LBB111_4
+; VI-NEXT: s_cbranch_scc0 .LBB111_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v7, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -43605,9 +42177,6 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v7
; VI-NEXT: .LBB111_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB111_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; VI-NEXT: s_branch .LBB111_2
;
; GFX9-LABEL: bitcast_v32i8_to_v16bf16_scalar:
; GFX9: ; %bb.0:
@@ -43632,7 +42201,7 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s8, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB111_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB111_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v0, s17
; GFX9-NEXT: v_mov_b32_e32 v4, 0xc0c0004
@@ -43774,9 +42343,6 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; GFX9-NEXT: v_lshl_or_b32 v7, v15, 16, v7
; GFX9-NEXT: .LBB111_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB111_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX9-NEXT: s_branch .LBB111_2
;
; GFX11-TRUE16-LABEL: bitcast_v32i8_to_v16bf16_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -43798,7 +42364,7 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s12, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB111_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB111_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, 0xc0c0004
; GFX11-TRUE16-NEXT: s_and_b32 s43, s28, 0xff
@@ -43903,9 +42469,6 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v9.l
; GFX11-TRUE16-NEXT: .LBB111_3: ; %end
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB111_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-TRUE16-NEXT: s_branch .LBB111_2
;
; GFX11-FAKE16-LABEL: bitcast_v32i8_to_v16bf16_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -43927,7 +42490,7 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s9, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s42, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB111_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB111_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, 0xc0c0004
; GFX11-FAKE16-NEXT: s_and_b32 s43, s28, 0xff
@@ -44045,9 +42608,6 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v7, 16, v14
; GFX11-FAKE16-NEXT: .LBB111_3: ; %end
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB111_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-FAKE16-NEXT: s_branch .LBB111_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.288bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.288bit.ll
index 6656733d53e51..5abe9c3e4db5a 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.288bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.288bit.ll
@@ -114,7 +114,7 @@ define inreg <9 x float> @bitcast_v9i32_to_v9f32_scalar(<9 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s25, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -138,14 +138,12 @@ define inreg <9 x float> @bitcast_v9i32_to_v9f32_scalar(<9 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v9i32_to_v9f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -169,14 +167,12 @@ define inreg <9 x float> @bitcast_v9i32_to_v9f32_scalar(<9 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v9i32_to_v9f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -200,15 +196,13 @@ define inreg <9 x float> @bitcast_v9i32_to_v9f32_scalar(<9 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v9i32_to_v9f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -230,8 +224,6 @@ define inreg <9 x float> @bitcast_v9i32_to_v9f32_scalar(<9 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_mov_b32_e32 v8, s20
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -352,9 +344,9 @@ define inreg <9 x i32> @bitcast_v9f32_to_v9i32_scalar(<9 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s25, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v8, s24, 1.0
; SI-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -367,8 +359,6 @@ define inreg <9 x i32> @bitcast_v9f32_to_v9i32_scalar(<9 x float> inreg %a, i32
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -384,9 +374,9 @@ define inreg <9 x i32> @bitcast_v9f32_to_v9i32_scalar(<9 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v8, s24, 1.0
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -399,8 +389,6 @@ define inreg <9 x i32> @bitcast_v9f32_to_v9i32_scalar(<9 x float> inreg %a, i32
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -416,9 +404,9 @@ define inreg <9 x i32> @bitcast_v9f32_to_v9i32_scalar(<9 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v8, s24, 1.0
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -431,8 +419,6 @@ define inreg <9 x i32> @bitcast_v9f32_to_v9i32_scalar(<9 x float> inreg %a, i32
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -453,10 +439,10 @@ define inreg <9 x i32> @bitcast_v9f32_to_v9i32_scalar(<9 x float> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v8, s20, 1.0
; GFX11-NEXT: v_add_f32_e64 v7, s19, 1.0
@@ -469,8 +455,6 @@ define inreg <9 x i32> @bitcast_v9f32_to_v9i32_scalar(<9 x float> inreg %a, i32
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -659,7 +643,7 @@ define inreg <18 x i16> @bitcast_v9i32_to_v18i16_scalar(<9 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s25, 0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s25, s23, 16
; SI-NEXT: s_lshr_b32 s26, s21, 16
@@ -728,23 +712,12 @@ define inreg <18 x i16> @bitcast_v9i32_to_v18i16_scalar(<9 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v7, s12
; SI-NEXT: v_mov_b32_e32 v8, s8
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v9i32_to_v18i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -768,14 +741,12 @@ define inreg <18 x i16> @bitcast_v9i32_to_v18i16_scalar(<9 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v9i32_to_v18i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -799,15 +770,13 @@ define inreg <18 x i16> @bitcast_v9i32_to_v18i16_scalar(<9 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v9i32_to_v18i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -829,8 +798,6 @@ define inreg <18 x i16> @bitcast_v9i32_to_v18i16_scalar(<9 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_mov_b32_e32 v8, s20
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1085,7 +1052,7 @@ define inreg <9 x i32> @bitcast_v18i16_to_v9i32_scalar(<18 x i16> inreg %a, i32
; SI-NEXT: s_lshr_b32 s42, s17, 16
; SI-NEXT: s_lshr_b32 s43, s16, 16
; SI-NEXT: s_cmp_lg_u32 s25, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s43, 16
@@ -1172,15 +1139,12 @@ define inreg <9 x i32> @bitcast_v18i16_to_v9i32_scalar(<18 x i16> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v7, s11
; SI-NEXT: v_mov_b32_e32 v8, s12
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v18i16_to_v9i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -1240,16 +1204,14 @@ define inreg <9 x i32> @bitcast_v18i16_to_v9i32_scalar(<18 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v18i16_to_v9i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_4
+; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
@@ -1262,8 +1224,6 @@ define inreg <9 x i32> @bitcast_v18i16_to_v9i32_scalar(<18 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB7_3:
-; GFX9-NEXT: s_branch .LBB7_2
-; GFX9-NEXT: .LBB7_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1284,10 +1244,10 @@ define inreg <9 x i32> @bitcast_v18i16_to_v9i32_scalar(<18 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
; GFX11-NEXT: .LBB7_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v8, s20, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v7, s19, 3 op_sel_hi:[1,0]
@@ -1300,8 +1260,6 @@ define inreg <9 x i32> @bitcast_v18i16_to_v9i32_scalar(<18 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: s_branch .LBB7_2
-; GFX11-NEXT: .LBB7_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -1490,7 +1448,7 @@ define inreg <18 x half> @bitcast_v9i32_to_v18f16_scalar(<9 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s25, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s25, s23, 16
; SI-NEXT: s_lshr_b32 s26, s21, 16
@@ -1559,23 +1517,12 @@ define inreg <18 x half> @bitcast_v9i32_to_v18f16_scalar(<9 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v7, s12
; SI-NEXT: v_mov_b32_e32 v8, s8
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v9i32_to_v18f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -1599,14 +1546,12 @@ define inreg <18 x half> @bitcast_v9i32_to_v18f16_scalar(<9 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v9i32_to_v18f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -1630,15 +1575,13 @@ define inreg <18 x half> @bitcast_v9i32_to_v18f16_scalar(<9 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v9i32_to_v18f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -1660,8 +1603,6 @@ define inreg <18 x half> @bitcast_v9i32_to_v18f16_scalar(<9 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_mov_b32_e32 v8, s20
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1953,7 +1894,7 @@ define inreg <9 x i32> @bitcast_v18f16_to_v9i32_scalar(<18 x half> inreg %a, i32
; SI-NEXT: s_lshr_b32 s42, s17, 16
; SI-NEXT: s_lshr_b32 s43, s16, 16
; SI-NEXT: s_cmp_lg_u32 s25, 0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s43, 16
@@ -1982,7 +1923,7 @@ define inreg <9 x i32> @bitcast_v18f16_to_v9i32_scalar(<18 x half> inreg %a, i32
; SI-NEXT: s_and_b32 s12, s24, 0xffff
; SI-NEXT: s_lshl_b32 s14, s13, 16
; SI-NEXT: s_or_b32 s12, s12, s14
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s43
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -2058,9 +1999,6 @@ define inreg <9 x i32> @bitcast_v18f16_to_v9i32_scalar(<18 x half> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v8, v10, v8
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB11_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -2076,9 +2014,9 @@ define inreg <9 x i32> @bitcast_v18f16_to_v9i32_scalar(<18 x half> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s24, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -2128,8 +2066,6 @@ define inreg <9 x i32> @bitcast_v18f16_to_v9i32_scalar(<18 x half> inreg %a, i32
; VI-NEXT: v_or_b32_e32 v0, v0, v9
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2145,9 +2081,9 @@ define inreg <9 x i32> @bitcast_v18f16_to_v9i32_scalar(<18 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v8, s24, v0 op_sel_hi:[1,0]
@@ -2161,8 +2097,6 @@ define inreg <9 x i32> @bitcast_v18f16_to_v9i32_scalar(<18 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2183,10 +2117,10 @@ define inreg <9 x i32> @bitcast_v18f16_to_v9i32_scalar(<18 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s20 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s19 op_sel_hi:[0,1]
@@ -2199,8 +2133,6 @@ define inreg <9 x i32> @bitcast_v18f16_to_v9i32_scalar(<18 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -2384,7 +2316,7 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s25, 0
-; SI-NEXT: s_cbranch_scc0 .LBB13_3
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s25, s17, 16
; SI-NEXT: s_lshr_b32 s28, s23, 16
@@ -2395,7 +2327,7 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3
; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB13_4
+; SI-NEXT: s_cbranch_execnz .LBB13_3
; SI-NEXT: .LBB13_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
; SI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -2415,19 +2347,8 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3
; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; SI-NEXT: s_branch .LBB13_5
+; SI-NEXT: s_branch .LBB13_4
; SI-NEXT: .LBB13_3:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB13_2
-; SI-NEXT: .LBB13_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -2446,7 +2367,7 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v13, s10
; SI-NEXT: v_mov_b32_e32 v12, s6
; SI-NEXT: v_mov_b32_e32 v9, s4
-; SI-NEXT: .LBB13_5: ; %end
+; SI-NEXT: .LBB13_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v14
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v0, v11
@@ -2480,9 +2401,9 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_3
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB13_4
+; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v8, s24, 1.0
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -2495,8 +2416,6 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB13_3:
-; VI-NEXT: s_branch .LBB13_2
-; VI-NEXT: .LBB13_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2519,9 +2438,9 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB13_4
+; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v8, s24, 1.0
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -2534,8 +2453,6 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB13_3:
-; GFX9-NEXT: s_branch .LBB13_2
-; GFX9-NEXT: .LBB13_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2563,10 +2480,10 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
; GFX11-NEXT: .LBB13_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v8, s20, 1.0
; GFX11-NEXT: v_add_f32_e64 v7, s19, 1.0
@@ -2579,8 +2496,6 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB13_3:
-; GFX11-NEXT: s_branch .LBB13_2
-; GFX11-NEXT: .LBB13_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -2844,7 +2759,7 @@ define inreg <9 x float> @bitcast_v18i16_to_v9f32_scalar(<18 x i16> inreg %a, i3
; SI-NEXT: s_lshr_b32 s42, s17, 16
; SI-NEXT: s_lshr_b32 s43, s16, 16
; SI-NEXT: s_cmp_lg_u32 s25, 0
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s43, 16
@@ -2931,15 +2846,12 @@ define inreg <9 x float> @bitcast_v18i16_to_v9f32_scalar(<18 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v7, s11
; SI-NEXT: v_mov_b32_e32 v8, s12
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v18i16_to_v9f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB15_3
; VI-NEXT: .LBB15_2: ; %cmp.true
@@ -2999,16 +2911,14 @@ define inreg <9 x float> @bitcast_v18i16_to_v9f32_scalar(<18 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v18i16_to_v9f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
@@ -3021,8 +2931,6 @@ define inreg <9 x float> @bitcast_v18i16_to_v9f32_scalar(<18 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -3043,10 +2951,10 @@ define inreg <9 x float> @bitcast_v18i16_to_v9f32_scalar(<18 x i16> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v8, s20, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v7, s19, 3 op_sel_hi:[1,0]
@@ -3059,8 +2967,6 @@ define inreg <9 x float> @bitcast_v18i16_to_v9f32_scalar(<18 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -3244,7 +3150,7 @@ define inreg <18 x half> @bitcast_v9f32_to_v18f16_scalar(<9 x float> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s25, 0
-; SI-NEXT: s_cbranch_scc0 .LBB17_3
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s25, s17, 16
; SI-NEXT: s_lshr_b32 s28, s23, 16
@@ -3255,7 +3161,7 @@ define inreg <18 x half> @bitcast_v9f32_to_v18f16_scalar(<9 x float> inreg %a, i
; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB17_4
+; SI-NEXT: s_cbranch_execnz .LBB17_3
; SI-NEXT: .LBB17_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
; SI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -3275,19 +3181,8 @@ define inreg <18 x half> @bitcast_v9f32_to_v18f16_scalar(<9 x float> inreg %a, i
; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; SI-NEXT: s_branch .LBB17_5
+; SI-NEXT: s_branch .LBB17_4
; SI-NEXT: .LBB17_3:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB17_2
-; SI-NEXT: .LBB17_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -3306,7 +3201,7 @@ define inreg <18 x half> @bitcast_v9f32_to_v18f16_scalar(<9 x float> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v13, s10
; SI-NEXT: v_mov_b32_e32 v12, s6
; SI-NEXT: v_mov_b32_e32 v9, s4
-; SI-NEXT: .LBB17_5: ; %end
+; SI-NEXT: .LBB17_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v14
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v0, v11
@@ -3340,9 +3235,9 @@ define inreg <18 x half> @bitcast_v9f32_to_v18f16_scalar(<9 x float> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_3
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB17_4
+; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v8, s24, 1.0
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -3355,8 +3250,6 @@ define inreg <18 x half> @bitcast_v9f32_to_v18f16_scalar(<9 x float> inreg %a, i
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB17_3:
-; VI-NEXT: s_branch .LBB17_2
-; VI-NEXT: .LBB17_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -3379,9 +3272,9 @@ define inreg <18 x half> @bitcast_v9f32_to_v18f16_scalar(<9 x float> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB17_4
+; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v8, s24, 1.0
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -3394,8 +3287,6 @@ define inreg <18 x half> @bitcast_v9f32_to_v18f16_scalar(<9 x float> inreg %a, i
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB17_3:
-; GFX9-NEXT: s_branch .LBB17_2
-; GFX9-NEXT: .LBB17_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -3423,10 +3314,10 @@ define inreg <18 x half> @bitcast_v9f32_to_v18f16_scalar(<9 x float> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
; GFX11-NEXT: .LBB17_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v8, s20, 1.0
; GFX11-NEXT: v_add_f32_e64 v7, s19, 1.0
@@ -3439,8 +3330,6 @@ define inreg <18 x half> @bitcast_v9f32_to_v18f16_scalar(<9 x float> inreg %a, i
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB17_3:
-; GFX11-NEXT: s_branch .LBB17_2
-; GFX11-NEXT: .LBB17_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -3741,7 +3630,7 @@ define inreg <9 x float> @bitcast_v18f16_to_v9f32_scalar(<18 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s42, s17, 16
; SI-NEXT: s_lshr_b32 s43, s16, 16
; SI-NEXT: s_cmp_lg_u32 s25, 0
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s43, 16
@@ -3770,7 +3659,7 @@ define inreg <9 x float> @bitcast_v18f16_to_v9f32_scalar(<18 x half> inreg %a, i
; SI-NEXT: s_and_b32 s12, s24, 0xffff
; SI-NEXT: s_lshl_b32 s14, s13, 16
; SI-NEXT: s_or_b32 s12, s12, s14
-; SI-NEXT: s_cbranch_execnz .LBB19_4
+; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s43
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -3846,9 +3735,6 @@ define inreg <9 x float> @bitcast_v18f16_to_v9f32_scalar(<18 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v8, v10, v8
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12
-; SI-NEXT: s_branch .LBB19_2
-; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -3864,9 +3750,9 @@ define inreg <9 x float> @bitcast_v18f16_to_v9f32_scalar(<18 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
+; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s24, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -3916,8 +3802,6 @@ define inreg <9 x float> @bitcast_v18f16_to_v9f32_scalar(<18 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v0, v9
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
-; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -3933,9 +3817,9 @@ define inreg <9 x float> @bitcast_v18f16_to_v9f32_scalar(<18 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v8, s24, v0 op_sel_hi:[1,0]
@@ -3949,8 +3833,6 @@ define inreg <9 x float> @bitcast_v18f16_to_v9f32_scalar(<18 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -3971,10 +3853,10 @@ define inreg <9 x float> @bitcast_v18f16_to_v9f32_scalar(<18 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s20 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s19 op_sel_hi:[0,1]
@@ -3987,8 +3869,6 @@ define inreg <9 x float> @bitcast_v18f16_to_v9f32_scalar(<18 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -4288,7 +4168,7 @@ define inreg <18 x half> @bitcast_v18i16_to_v18f16_scalar(<18 x i16> inreg %a, i
; SI-NEXT: s_lshr_b32 s43, s17, 16
; SI-NEXT: s_lshr_b32 s47, s16, 16
; SI-NEXT: s_cmp_lg_u32 s25, 0
-; SI-NEXT: s_cbranch_scc0 .LBB21_4
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s5, s24, 0xffff
; SI-NEXT: s_lshl_b32 s7, s42, 16
@@ -4419,23 +4299,12 @@ define inreg <18 x half> @bitcast_v18i16_to_v18f16_scalar(<18 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v7, s5
; SI-NEXT: v_mov_b32_e32 v8, s12
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_4:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: s_branch .LBB21_2
;
; VI-LABEL: bitcast_v18i16_to_v18f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
@@ -4495,16 +4364,14 @@ define inreg <18 x half> @bitcast_v18i16_to_v18f16_scalar(<18 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v18i16_to_v18f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
+; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
@@ -4517,8 +4384,6 @@ define inreg <18 x half> @bitcast_v18i16_to_v18f16_scalar(<18 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
-; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -4546,10 +4411,10 @@ define inreg <18 x half> @bitcast_v18i16_to_v18f16_scalar(<18 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
; GFX11-NEXT: .LBB21_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v8, s20, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v7, s19, 3 op_sel_hi:[1,0]
@@ -4562,8 +4427,6 @@ define inreg <18 x half> @bitcast_v18i16_to_v18f16_scalar(<18 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
-; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -4833,9 +4696,9 @@ define inreg <18 x i16> @bitcast_v18f16_to_v18i16_scalar(<18 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s6, s17, 16
; SI-NEXT: s_lshr_b32 s14, s16, 16
; SI-NEXT: s_cmp_lg_u32 s25, 0
-; SI-NEXT: s_cbranch_scc0 .LBB23_3
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_4
+; SI-NEXT: s_cbranch_execnz .LBB23_3
; SI-NEXT: .LBB23_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s14
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -4913,10 +4776,8 @@ define inreg <18 x i16> @bitcast_v18f16_to_v18i16_scalar(<18 x half> inreg %a, i
; SI-NEXT: v_lshr_b64 v[12:13], v[2:3], 16
; SI-NEXT: v_lshr_b64 v[10:11], v[4:5], 16
; SI-NEXT: v_lshr_b64 v[8:9], v[6:7], 16
-; SI-NEXT: s_branch .LBB23_5
+; SI-NEXT: s_branch .LBB23_4
; SI-NEXT: .LBB23_3:
-; SI-NEXT: s_branch .LBB23_2
-; SI-NEXT: .LBB23_4:
; SI-NEXT: v_mov_b32_e32 v17, s11
; SI-NEXT: v_mov_b32_e32 v21, s9
; SI-NEXT: v_mov_b32_e32 v22, s8
@@ -4935,7 +4796,7 @@ define inreg <18 x i16> @bitcast_v18f16_to_v18i16_scalar(<18 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v12, s13
; SI-NEXT: v_mov_b32_e32 v10, s12
; SI-NEXT: v_mov_b32_e32 v8, s10
-; SI-NEXT: .LBB23_5: ; %end
+; SI-NEXT: .LBB23_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v14
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v16
; SI-NEXT: v_or_b32_e32 v0, v2, v0
@@ -4969,9 +4830,9 @@ define inreg <18 x i16> @bitcast_v18f16_to_v18i16_scalar(<18 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_3
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_4
+; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s5, s23, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -5021,8 +4882,6 @@ define inreg <18 x i16> @bitcast_v18f16_to_v18i16_scalar(<18 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v9, v10
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB23_3:
-; VI-NEXT: s_branch .LBB23_2
-; VI-NEXT: .LBB23_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -5045,9 +4904,9 @@ define inreg <18 x i16> @bitcast_v18f16_to_v18i16_scalar(<18 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_4
+; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v8, s24, v0 op_sel_hi:[1,0]
@@ -5061,8 +4920,6 @@ define inreg <18 x i16> @bitcast_v18f16_to_v18i16_scalar(<18 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB23_3:
-; GFX9-NEXT: s_branch .LBB23_2
-; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -5090,10 +4947,10 @@ define inreg <18 x i16> @bitcast_v18f16_to_v18i16_scalar(<18 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
; GFX11-NEXT: .LBB23_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s20 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s19 op_sel_hi:[0,1]
@@ -5106,8 +4963,6 @@ define inreg <18 x i16> @bitcast_v18f16_to_v18i16_scalar(<18 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
-; GFX11-NEXT: .LBB23_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
index 9ae6700ac1825..32836fb256b2c 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
@@ -121,7 +121,7 @@ define inreg <10 x float> @bitcast_v10i32_to_v10f32_scalar(<10 x i32> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -147,14 +147,12 @@ define inreg <10 x float> @bitcast_v10i32_to_v10f32_scalar(<10 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v10i32_to_v10f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -180,14 +178,12 @@ define inreg <10 x float> @bitcast_v10i32_to_v10f32_scalar(<10 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v10i32_to_v10f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -213,15 +209,13 @@ define inreg <10 x float> @bitcast_v10i32_to_v10f32_scalar(<10 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v10i32_to_v10f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -244,8 +238,6 @@ define inreg <10 x float> @bitcast_v10i32_to_v10f32_scalar(<10 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -372,9 +364,9 @@ define inreg <10 x i32> @bitcast_v10f32_to_v10i32_scalar(<10 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v9, s25, 1.0
; SI-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -388,8 +380,6 @@ define inreg <10 x i32> @bitcast_v10f32_to_v10i32_scalar(<10 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -406,9 +396,9 @@ define inreg <10 x i32> @bitcast_v10f32_to_v10i32_scalar(<10 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
; VI-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -422,8 +412,6 @@ define inreg <10 x i32> @bitcast_v10f32_to_v10i32_scalar(<10 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -440,9 +428,9 @@ define inreg <10 x i32> @bitcast_v10f32_to_v10i32_scalar(<10 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
; GFX9-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -456,8 +444,6 @@ define inreg <10 x i32> @bitcast_v10f32_to_v10i32_scalar(<10 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -479,10 +465,10 @@ define inreg <10 x i32> @bitcast_v10f32_to_v10i32_scalar(<10 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
; GFX11-NEXT: v_add_f32_e64 v8, s20, 1.0
@@ -496,8 +482,6 @@ define inreg <10 x i32> @bitcast_v10f32_to_v10i32_scalar(<10 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -699,7 +683,7 @@ define inreg <20 x i16> @bitcast_v10i32_to_v20i16_scalar(<10 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s26, s25, 16
; SI-NEXT: s_lshr_b32 s27, s23, 16
@@ -775,24 +759,12 @@ define inreg <20 x i16> @bitcast_v10i32_to_v20i16_scalar(<10 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v8, s4
; SI-NEXT: v_mov_b32_e32 v9, s13
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v10i32_to_v20i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -818,14 +790,12 @@ define inreg <20 x i16> @bitcast_v10i32_to_v20i16_scalar(<10 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v10i32_to_v20i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -851,15 +821,13 @@ define inreg <20 x i16> @bitcast_v10i32_to_v20i16_scalar(<10 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v10i32_to_v20i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -882,8 +850,6 @@ define inreg <20 x i16> @bitcast_v10i32_to_v20i16_scalar(<10 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1156,7 +1122,7 @@ define inreg <10 x i32> @bitcast_v20i16_to_v10i32_scalar(<20 x i16> inreg %a, i3
; SI-NEXT: s_lshr_b32 s45, s17, 16
; SI-NEXT: s_lshr_b32 s46, s16, 16
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s46, 16
@@ -1252,15 +1218,12 @@ define inreg <10 x i32> @bitcast_v20i16_to_v10i32_scalar(<20 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v8, s12
; SI-NEXT: v_mov_b32_e32 v9, s13
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v20i16_to_v10i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -1326,16 +1289,14 @@ define inreg <10 x i32> @bitcast_v20i16_to_v10i32_scalar(<20 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v20i16_to_v10i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_4
+; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
@@ -1349,8 +1310,6 @@ define inreg <10 x i32> @bitcast_v20i16_to_v10i32_scalar(<20 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB7_3:
-; GFX9-NEXT: s_branch .LBB7_2
-; GFX9-NEXT: .LBB7_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1372,10 +1331,10 @@ define inreg <10 x i32> @bitcast_v20i16_to_v10i32_scalar(<20 x i16> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
; GFX11-NEXT: .LBB7_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v8, s20, 3 op_sel_hi:[1,0]
@@ -1389,8 +1348,6 @@ define inreg <10 x i32> @bitcast_v20i16_to_v10i32_scalar(<20 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: s_branch .LBB7_2
-; GFX11-NEXT: .LBB7_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -1592,7 +1549,7 @@ define inreg <20 x half> @bitcast_v10i32_to_v20f16_scalar(<10 x i32> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s26, s25, 16
; SI-NEXT: s_lshr_b32 s27, s23, 16
@@ -1668,24 +1625,12 @@ define inreg <20 x half> @bitcast_v10i32_to_v20f16_scalar(<10 x i32> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v8, s4
; SI-NEXT: v_mov_b32_e32 v9, s13
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v10i32_to_v20f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -1711,14 +1656,12 @@ define inreg <20 x half> @bitcast_v10i32_to_v20f16_scalar(<10 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v10i32_to_v20f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -1744,15 +1687,13 @@ define inreg <20 x half> @bitcast_v10i32_to_v20f16_scalar(<10 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v10i32_to_v20f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -1775,8 +1716,6 @@ define inreg <20 x half> @bitcast_v10i32_to_v20f16_scalar(<10 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2089,7 +2028,7 @@ define inreg <10 x i32> @bitcast_v20f16_to_v10i32_scalar(<20 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s45, s17, 16
; SI-NEXT: s_lshr_b32 s46, s16, 16
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s46, 16
@@ -2121,7 +2060,7 @@ define inreg <10 x i32> @bitcast_v20f16_to_v10i32_scalar(<20 x half> inreg %a, i
; SI-NEXT: s_and_b32 s13, s25, 0xffff
; SI-NEXT: s_lshl_b32 s14, s27, 16
; SI-NEXT: s_or_b32 s13, s13, s14
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s46
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -2205,9 +2144,6 @@ define inreg <10 x i32> @bitcast_v20f16_to_v10i32_scalar(<20 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v9, v11, v9
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB11_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -2224,9 +2160,9 @@ define inreg <10 x i32> @bitcast_v20f16_to_v10i32_scalar(<20 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s25, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -2281,8 +2217,6 @@ define inreg <10 x i32> @bitcast_v20f16_to_v10i32_scalar(<20 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v0, v10
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2299,9 +2233,9 @@ define inreg <10 x i32> @bitcast_v20f16_to_v10i32_scalar(<20 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v9, s25, v0 op_sel_hi:[1,0]
@@ -2316,8 +2250,6 @@ define inreg <10 x i32> @bitcast_v20f16_to_v10i32_scalar(<20 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2339,10 +2271,10 @@ define inreg <10 x i32> @bitcast_v20f16_to_v10i32_scalar(<20 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s20 op_sel_hi:[0,1]
@@ -2356,8 +2288,6 @@ define inreg <10 x i32> @bitcast_v20f16_to_v10i32_scalar(<20 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -3256,7 +3186,7 @@ define inreg <40 x i8> @bitcast_v10i32_to_v40i8_scalar(<10 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB13_4
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s72, s25, 24
; SI-NEXT: s_lshr_b32 s73, s25, 16
@@ -3460,44 +3390,12 @@ define inreg <40 x i8> @bitcast_v10i32_to_v40i8_scalar(<10 x i32> inreg %a, i32
; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB13_4:
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: s_branch .LBB13_2
;
; VI-LABEL: bitcast_v10i32_to_v40i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s26, s25, 24
; VI-NEXT: s_lshr_b32 s27, s25, 16
@@ -3654,44 +3552,12 @@ define inreg <40 x i8> @bitcast_v10i32_to_v40i8_scalar(<10 x i32> inreg %a, i32
; VI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr29
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr28
-; VI-NEXT: ; implicit-def: $sgpr27
-; VI-NEXT: ; implicit-def: $sgpr26
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v10i32_to_v40i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s26, s25, 24
; GFX9-NEXT: s_lshr_b32 s27, s25, 16
@@ -3839,45 +3705,13 @@ define inreg <40 x i8> @bitcast_v10i32_to_v40i8_scalar(<10 x i32> inreg %a, i32
; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:36
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr29
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr28
-; GFX9-NEXT: ; implicit-def: $sgpr27
-; GFX9-NEXT: ; implicit-def: $sgpr26
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v10i32_to_v40i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s63, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s14, s21, 24
; GFX11-NEXT: s_lshr_b32 s22, s21, 16
@@ -4000,38 +3834,6 @@ define inreg <40 x i8> @bitcast_v10i32_to_v40i8_scalar(<10 x i32> inreg %a, i32
; GFX11-NEXT: scratch_store_b128 v0, v[5:8], off offset:16
; GFX11-NEXT: scratch_store_b64 v0, v[9:10], off offset:32
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr29
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr25
-; GFX11-NEXT: ; implicit-def: $sgpr27
-; GFX11-NEXT: ; implicit-def: $sgpr26
-; GFX11-NEXT: ; implicit-def: $sgpr23
-; GFX11-NEXT: ; implicit-def: $sgpr24
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr22
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5279,7 +5081,7 @@ define inreg <10 x i32> @bitcast_v40i8_to_v10i32_scalar(<40 x i8> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s79, v2
; SI-NEXT: v_readfirstlane_b32 s88, v1
; SI-NEXT: v_readfirstlane_b32 s89, v0
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -5515,9 +5317,6 @@ define inreg <10 x i32> @bitcast_v40i8_to_v10i32_scalar(<40 x i8> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v8, s12
; SI-NEXT: v_mov_b32_e32 v9, s13
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v40i8_to_v10i32_scalar:
; VI: ; %bb.0:
@@ -5550,7 +5349,7 @@ define inreg <10 x i32> @bitcast_v40i8_to_v10i32_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s62, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s63, v0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v9, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -5718,9 +5517,6 @@ define inreg <10 x i32> @bitcast_v40i8_to_v10i32_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v9
; VI-NEXT: .LBB15_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v40i8_to_v10i32_scalar:
; GFX9: ; %bb.0:
@@ -5753,7 +5549,7 @@ define inreg <10 x i32> @bitcast_v40i8_to_v10i32_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s62, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s63, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v9, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -5912,9 +5708,6 @@ define inreg <10 x i32> @bitcast_v40i8_to_v10i32_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: v_or_b32_sdwa v9, v10, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB15_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB15_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX9-NEXT: s_branch .LBB15_2
;
; GFX11-LABEL: bitcast_v40i8_to_v10i32_scalar:
; GFX11: ; %bb.0:
@@ -5944,7 +5737,7 @@ define inreg <10 x i32> @bitcast_v40i8_to_v10i32_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: v_readfirstlane_b32 s57, v0
; GFX11-NEXT: s_cmp_lg_u32 s58, 0
; GFX11-NEXT: s_mov_b32 s58, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v5, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -6084,9 +5877,6 @@ define inreg <10 x i32> @bitcast_v40i8_to_v10i32_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: v_or_b32_e32 v9, v14, v15
; GFX11-NEXT: .LBB15_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB15_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX11-NEXT: s_branch .LBB15_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6219,7 +6009,7 @@ define inreg <5 x double> @bitcast_v10i32_to_v5f64_scalar(<10 x i32> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB17_4
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB17_3
; SI-NEXT: .LBB17_2: ; %cmp.true
@@ -6245,14 +6035,12 @@ define inreg <5 x double> @bitcast_v10i32_to_v5f64_scalar(<10 x i32> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB17_4:
-; SI-NEXT: s_branch .LBB17_2
;
; VI-LABEL: bitcast_v10i32_to_v5f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
@@ -6278,14 +6066,12 @@ define inreg <5 x double> @bitcast_v10i32_to_v5f64_scalar(<10 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v10i32_to_v5f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
@@ -6311,15 +6097,13 @@ define inreg <5 x double> @bitcast_v10i32_to_v5f64_scalar(<10 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v10i32_to_v5f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
@@ -6342,8 +6126,6 @@ define inreg <5 x double> @bitcast_v10i32_to_v5f64_scalar(<10 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6456,9 +6238,9 @@ define inreg <10 x i32> @bitcast_v5f64_to_v10i32_scalar(<5 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB19_4
+; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; SI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -6467,8 +6249,6 @@ define inreg <10 x i32> @bitcast_v5f64_to_v10i32_scalar(<5 x double> inreg %a, i
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_3:
-; SI-NEXT: s_branch .LBB19_2
-; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -6485,9 +6265,9 @@ define inreg <10 x i32> @bitcast_v5f64_to_v10i32_scalar(<5 x double> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
+; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -6496,8 +6276,6 @@ define inreg <10 x i32> @bitcast_v5f64_to_v10i32_scalar(<5 x double> inreg %a, i
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
-; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -6514,9 +6292,9 @@ define inreg <10 x i32> @bitcast_v5f64_to_v10i32_scalar(<5 x double> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -6525,8 +6303,6 @@ define inreg <10 x i32> @bitcast_v5f64_to_v10i32_scalar(<5 x double> inreg %a, i
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -6548,10 +6324,10 @@ define inreg <10 x i32> @bitcast_v5f64_to_v10i32_scalar(<5 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[6:7], s[18:19], 1.0
@@ -6560,8 +6336,6 @@ define inreg <10 x i32> @bitcast_v5f64_to_v10i32_scalar(<5 x double> inreg %a, i
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -6700,7 +6474,7 @@ define inreg <5 x i64> @bitcast_v10i32_to_v5i64_scalar(<10 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB21_4
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB21_3
; SI-NEXT: .LBB21_2: ; %cmp.true
@@ -6726,14 +6500,12 @@ define inreg <5 x i64> @bitcast_v10i32_to_v5i64_scalar(<10 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_4:
-; SI-NEXT: s_branch .LBB21_2
;
; VI-LABEL: bitcast_v10i32_to_v5i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
@@ -6759,14 +6531,12 @@ define inreg <5 x i64> @bitcast_v10i32_to_v5i64_scalar(<10 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v10i32_to_v5i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
@@ -6792,15 +6562,13 @@ define inreg <5 x i64> @bitcast_v10i32_to_v5i64_scalar(<10 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_v10i32_to_v5i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
@@ -6823,8 +6591,6 @@ define inreg <5 x i64> @bitcast_v10i32_to_v5i64_scalar(<10 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_4:
-; GFX11-NEXT: s_branch .LBB21_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6960,7 +6726,7 @@ define inreg <10 x i32> @bitcast_v5i64_to_v10i32_scalar(<5 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB23_3
; SI-NEXT: .LBB23_2: ; %cmp.true
@@ -6986,14 +6752,12 @@ define inreg <10 x i32> @bitcast_v5i64_to_v10i32_scalar(<5 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v5i64_to_v10i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
@@ -7019,14 +6783,12 @@ define inreg <10 x i32> @bitcast_v5i64_to_v10i32_scalar(<5 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v5i64_to_v10i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
@@ -7052,15 +6814,13 @@ define inreg <10 x i32> @bitcast_v5i64_to_v10i32_scalar(<5 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v5i64_to_v10i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
@@ -7083,8 +6843,6 @@ define inreg <10 x i32> @bitcast_v5i64_to_v10i32_scalar(<5 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: s_branch .LBB23_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7274,7 +7032,7 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB25_3
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s40, s25, 16
; SI-NEXT: s_lshr_b32 s29, s23, 16
@@ -7286,7 +7044,7 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB25_4
+; SI-NEXT: s_cbranch_execnz .LBB25_3
; SI-NEXT: .LBB25_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v9, s25, 1.0
; SI-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -7308,20 +7066,8 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v1
-; SI-NEXT: s_branch .LBB25_5
+; SI-NEXT: s_branch .LBB25_4
; SI-NEXT: .LBB25_3:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: s_branch .LBB25_2
-; SI-NEXT: .LBB25_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -7342,7 +7088,7 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v12, s8
; SI-NEXT: v_mov_b32_e32 v11, s6
; SI-NEXT: v_mov_b32_e32 v10, s4
-; SI-NEXT: .LBB25_5: ; %end
+; SI-NEXT: .LBB25_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
@@ -7379,9 +7125,9 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB25_3
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_4
+; VI-NEXT: s_cbranch_execnz .LBB25_3
; VI-NEXT: .LBB25_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
; VI-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -7395,8 +7141,6 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB25_3:
-; VI-NEXT: s_branch .LBB25_2
-; VI-NEXT: .LBB25_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -7419,9 +7163,9 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_4
+; GFX9-NEXT: s_cbranch_execnz .LBB25_3
; GFX9-NEXT: .LBB25_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
; GFX9-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -7435,8 +7179,6 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB25_3:
-; GFX9-NEXT: s_branch .LBB25_2
-; GFX9-NEXT: .LBB25_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -7464,10 +7206,10 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_3
; GFX11-NEXT: .LBB25_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
; GFX11-NEXT: v_add_f32_e64 v8, s20, 1.0
@@ -7481,8 +7223,6 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
-; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -7764,7 +7504,7 @@ define inreg <10 x float> @bitcast_v20i16_to_v10f32_scalar(<20 x i16> inreg %a,
; SI-NEXT: s_lshr_b32 s45, s17, 16
; SI-NEXT: s_lshr_b32 s46, s16, 16
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB27_4
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s46, 16
@@ -7860,15 +7600,12 @@ define inreg <10 x float> @bitcast_v20i16_to_v10f32_scalar(<20 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v8, s12
; SI-NEXT: v_mov_b32_e32 v9, s13
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB27_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13
-; SI-NEXT: s_branch .LBB27_2
;
; VI-LABEL: bitcast_v20i16_to_v10f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB27_4
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB27_3
; VI-NEXT: .LBB27_2: ; %cmp.true
@@ -7934,16 +7671,14 @@ define inreg <10 x float> @bitcast_v20i16_to_v10f32_scalar(<20 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_4:
-; VI-NEXT: s_branch .LBB27_2
;
; GFX9-LABEL: bitcast_v20i16_to_v10f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_4
+; GFX9-NEXT: s_cbranch_execnz .LBB27_3
; GFX9-NEXT: .LBB27_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
@@ -7957,8 +7692,6 @@ define inreg <10 x float> @bitcast_v20i16_to_v10f32_scalar(<20 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB27_3:
-; GFX9-NEXT: s_branch .LBB27_2
-; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -7980,10 +7713,10 @@ define inreg <10 x float> @bitcast_v20i16_to_v10f32_scalar(<20 x i16> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB27_3
; GFX11-NEXT: .LBB27_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v8, s20, 3 op_sel_hi:[1,0]
@@ -7997,8 +7730,6 @@ define inreg <10 x float> @bitcast_v20i16_to_v10f32_scalar(<20 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
-; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -8194,7 +7925,7 @@ define inreg <20 x half> @bitcast_v10f32_to_v20f16_scalar(<10 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s40, s25, 16
; SI-NEXT: s_lshr_b32 s29, s23, 16
@@ -8206,7 +7937,7 @@ define inreg <20 x half> @bitcast_v10f32_to_v20f16_scalar(<10 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB29_4
+; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v9, s25, 1.0
; SI-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -8228,20 +7959,8 @@ define inreg <20 x half> @bitcast_v10f32_to_v20f16_scalar(<10 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v1
-; SI-NEXT: s_branch .LBB29_5
+; SI-NEXT: s_branch .LBB29_4
; SI-NEXT: .LBB29_3:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: s_branch .LBB29_2
-; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -8262,7 +7981,7 @@ define inreg <20 x half> @bitcast_v10f32_to_v20f16_scalar(<10 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v12, s8
; SI-NEXT: v_mov_b32_e32 v11, s6
; SI-NEXT: v_mov_b32_e32 v10, s4
-; SI-NEXT: .LBB29_5: ; %end
+; SI-NEXT: .LBB29_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
@@ -8299,9 +8018,9 @@ define inreg <20 x half> @bitcast_v10f32_to_v20f16_scalar(<10 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
+; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
; VI-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -8315,8 +8034,6 @@ define inreg <20 x half> @bitcast_v10f32_to_v20f16_scalar(<10 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
-; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -8339,9 +8056,9 @@ define inreg <20 x half> @bitcast_v10f32_to_v20f16_scalar(<10 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
+; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
; GFX9-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -8355,8 +8072,6 @@ define inreg <20 x half> @bitcast_v10f32_to_v20f16_scalar(<10 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
-; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -8384,10 +8099,10 @@ define inreg <20 x half> @bitcast_v10f32_to_v20f16_scalar(<10 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-NEXT: .LBB29_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
; GFX11-NEXT: v_add_f32_e64 v8, s20, 1.0
@@ -8401,8 +8116,6 @@ define inreg <20 x half> @bitcast_v10f32_to_v20f16_scalar(<10 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
-; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -8724,7 +8437,7 @@ define inreg <10 x float> @bitcast_v20f16_to_v10f32_scalar(<20 x half> inreg %a,
; SI-NEXT: s_lshr_b32 s45, s17, 16
; SI-NEXT: s_lshr_b32 s46, s16, 16
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB31_3
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s46, 16
@@ -8756,7 +8469,7 @@ define inreg <10 x float> @bitcast_v20f16_to_v10f32_scalar(<20 x half> inreg %a,
; SI-NEXT: s_and_b32 s13, s25, 0xffff
; SI-NEXT: s_lshl_b32 s14, s27, 16
; SI-NEXT: s_or_b32 s13, s13, s14
-; SI-NEXT: s_cbranch_execnz .LBB31_4
+; SI-NEXT: s_cbranch_execnz .LBB31_3
; SI-NEXT: .LBB31_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s46
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -8840,9 +8553,6 @@ define inreg <10 x float> @bitcast_v20f16_to_v10f32_scalar(<20 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v9, v11, v9
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB31_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13
-; SI-NEXT: s_branch .LBB31_2
-; SI-NEXT: .LBB31_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -8859,9 +8569,9 @@ define inreg <10 x float> @bitcast_v20f16_to_v10f32_scalar(<20 x half> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_3
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB31_4
+; VI-NEXT: s_cbranch_execnz .LBB31_3
; VI-NEXT: .LBB31_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s25, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -8916,8 +8626,6 @@ define inreg <10 x float> @bitcast_v20f16_to_v10f32_scalar(<20 x half> inreg %a,
; VI-NEXT: v_or_b32_e32 v0, v0, v10
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB31_3:
-; VI-NEXT: s_branch .LBB31_2
-; VI-NEXT: .LBB31_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -8934,9 +8642,9 @@ define inreg <10 x float> @bitcast_v20f16_to_v10f32_scalar(<20 x half> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_4
+; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v9, s25, v0 op_sel_hi:[1,0]
@@ -8951,8 +8659,6 @@ define inreg <10 x float> @bitcast_v20f16_to_v10f32_scalar(<20 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB31_3:
-; GFX9-NEXT: s_branch .LBB31_2
-; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -8974,10 +8680,10 @@ define inreg <10 x float> @bitcast_v20f16_to_v10f32_scalar(<20 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-NEXT: .LBB31_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s20 op_sel_hi:[0,1]
@@ -8991,8 +8697,6 @@ define inreg <10 x float> @bitcast_v20f16_to_v10f32_scalar(<20 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
-; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -9883,7 +9587,7 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s91, s25, 24
; SI-NEXT: s_lshr_b32 s93, s25, 16
@@ -9915,7 +9619,7 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 24
; SI-NEXT: s_lshr_b64 s[58:59], s[16:17], 16
; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 8
-; SI-NEXT: s_cbranch_execnz .LBB33_4
+; SI-NEXT: s_cbranch_execnz .LBB33_3
; SI-NEXT: .LBB33_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
; SI-NEXT: v_add_f32_e64 v6, s20, 1.0
@@ -9967,40 +9671,8 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v12
; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v12
; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v12
-; SI-NEXT: s_branch .LBB33_5
+; SI-NEXT: s_branch .LBB33_4
; SI-NEXT: .LBB33_3:
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: s_branch .LBB33_2
-; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v13, s16
; SI-NEXT: v_mov_b32_e32 v12, s17
; SI-NEXT: v_mov_b32_e32 v10, s18
@@ -10026,7 +9698,7 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v11, s94
; SI-NEXT: v_mov_b32_e32 v9, s93
; SI-NEXT: v_mov_b32_e32 v8, s91
-; SI-NEXT: .LBB33_5: ; %end
+; SI-NEXT: .LBB33_4: ; %end
; SI-NEXT: v_and_b32_e32 v13, 0xff, v13
; SI-NEXT: s_lshl_b32 s5, s60, 8
; SI-NEXT: v_or_b32_e32 v13, s5, v13
@@ -10152,7 +9824,7 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s26, s25, 24
; VI-NEXT: s_lshr_b32 s27, s25, 16
@@ -10184,7 +9856,7 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; VI-NEXT: s_lshr_b64 s[8:9], s[20:21], 24
; VI-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB33_4
+; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v2, s25, 1.0
; VI-NEXT: v_add_f32_e64 v1, s24, 1.0
@@ -10226,40 +9898,8 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; VI-NEXT: v_lshrrev_b32_e32 v38, 8, v10
; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v9
; VI-NEXT: v_lshrrev_b32_e32 v39, 8, v9
-; VI-NEXT: s_branch .LBB33_5
+; VI-NEXT: s_branch .LBB33_4
; VI-NEXT: .LBB33_3:
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr28
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr29
-; VI-NEXT: ; implicit-def: $sgpr27
-; VI-NEXT: ; implicit-def: $sgpr26
-; VI-NEXT: s_branch .LBB33_2
-; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v9, s16
; VI-NEXT: v_mov_b32_e32 v10, s17
; VI-NEXT: v_mov_b32_e32 v7, s18
@@ -10300,7 +9940,7 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v13, s8
; VI-NEXT: v_mov_b32_e32 v12, s10
; VI-NEXT: v_mov_b32_e32 v11, s12
-; VI-NEXT: .LBB33_5: ; %end
+; VI-NEXT: .LBB33_4: ; %end
; VI-NEXT: s_mov_b32 s4, 0xc0c0004
; VI-NEXT: v_perm_b32 v15, v48, v15, s4
; VI-NEXT: v_perm_b32 v9, v9, v39, s4
@@ -10368,7 +10008,7 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s26, s25, 24
; GFX9-NEXT: s_lshr_b32 s27, s25, 16
@@ -10400,7 +10040,7 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; GFX9-NEXT: s_lshr_b64 s[8:9], s[20:21], 24
; GFX9-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
+; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v2, s25, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s24, 1.0
@@ -10442,40 +10082,8 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 8, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 8, v9
-; GFX9-NEXT: s_branch .LBB33_5
+; GFX9-NEXT: s_branch .LBB33_4
; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr28
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr29
-; GFX9-NEXT: ; implicit-def: $sgpr27
-; GFX9-NEXT: ; implicit-def: $sgpr26
-; GFX9-NEXT: s_branch .LBB33_2
-; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v9, s16
; GFX9-NEXT: v_mov_b32_e32 v10, s17
; GFX9-NEXT: v_mov_b32_e32 v7, s18
@@ -10516,7 +10124,7 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v13, s8
; GFX9-NEXT: v_mov_b32_e32 v12, s10
; GFX9-NEXT: v_mov_b32_e32 v11, s12
-; GFX9-NEXT: .LBB33_5: ; %end
+; GFX9-NEXT: .LBB33_4: ; %end
; GFX9-NEXT: s_mov_b32 s4, 0xc0c0004
; GFX9-NEXT: v_perm_b32 v15, v48, v15, s4
; GFX9-NEXT: v_perm_b32 v9, v9, v39, s4
@@ -10576,7 +10184,7 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s14, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s15, s21, 24
; GFX11-NEXT: s_lshr_b32 s22, s21, 16
@@ -10609,7 +10217,7 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-NEXT: .LBB33_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v2, s21, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s20, 1.0
@@ -10651,40 +10259,8 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; GFX11-NEXT: v_lshrrev_b32_e32 v38, 8, v10
; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
; GFX11-NEXT: v_lshrrev_b32_e32 v39, 8, v9
-; GFX11-NEXT: s_branch .LBB33_5
+; GFX11-NEXT: s_branch .LBB33_4
; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr29
-; GFX11-NEXT: ; implicit-def: $sgpr27
-; GFX11-NEXT: ; implicit-def: $sgpr26
-; GFX11-NEXT: ; implicit-def: $sgpr25
-; GFX11-NEXT: ; implicit-def: $sgpr23
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr24
-; GFX11-NEXT: ; implicit-def: $sgpr22
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: s_branch .LBB33_2
-; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v9, s0 :: v_dual_mov_b32 v10, s1
; GFX11-NEXT: v_dual_mov_b32 v7, s2 :: v_dual_mov_b32 v8, s3
; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v6, s17
@@ -10705,7 +10281,7 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v15, s4
; GFX11-NEXT: v_dual_mov_b32 v14, s6 :: v_dual_mov_b32 v13, s8
; GFX11-NEXT: v_dual_mov_b32 v12, s10 :: v_dual_mov_b32 v11, s12
-; GFX11-NEXT: .LBB33_5: ; %end
+; GFX11-NEXT: .LBB33_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_perm_b32 v15, v48, v15, 0xc0c0004
; GFX11-NEXT: v_perm_b32 v9, v9, v39, 0xc0c0004
@@ -11999,7 +11575,7 @@ define inreg <10 x float> @bitcast_v40i8_to_v10f32_scalar(<40 x i8> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s79, v2
; SI-NEXT: v_readfirstlane_b32 s88, v1
; SI-NEXT: v_readfirstlane_b32 s89, v0
-; SI-NEXT: s_cbranch_scc0 .LBB35_4
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -12235,9 +11811,6 @@ define inreg <10 x float> @bitcast_v40i8_to_v10f32_scalar(<40 x i8> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v8, s12
; SI-NEXT: v_mov_b32_e32 v9, s13
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB35_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13
-; SI-NEXT: s_branch .LBB35_2
;
; VI-LABEL: bitcast_v40i8_to_v10f32_scalar:
; VI: ; %bb.0:
@@ -12270,7 +11843,7 @@ define inreg <10 x float> @bitcast_v40i8_to_v10f32_scalar(<40 x i8> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s62, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s63, v0
-; VI-NEXT: s_cbranch_scc0 .LBB35_4
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v9, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -12438,9 +12011,6 @@ define inreg <10 x float> @bitcast_v40i8_to_v10f32_scalar(<40 x i8> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v9
; VI-NEXT: .LBB35_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB35_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; VI-NEXT: s_branch .LBB35_2
;
; GFX9-LABEL: bitcast_v40i8_to_v10f32_scalar:
; GFX9: ; %bb.0:
@@ -12473,7 +12043,7 @@ define inreg <10 x float> @bitcast_v40i8_to_v10f32_scalar(<40 x i8> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s62, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s63, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v9, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -12632,9 +12202,6 @@ define inreg <10 x float> @bitcast_v40i8_to_v10f32_scalar(<40 x i8> inreg %a, i3
; GFX9-NEXT: v_or_b32_sdwa v9, v10, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB35_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB35_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX9-NEXT: s_branch .LBB35_2
;
; GFX11-LABEL: bitcast_v40i8_to_v10f32_scalar:
; GFX11: ; %bb.0:
@@ -12664,7 +12231,7 @@ define inreg <10 x float> @bitcast_v40i8_to_v10f32_scalar(<40 x i8> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s57, v0
; GFX11-NEXT: s_cmp_lg_u32 s58, 0
; GFX11-NEXT: s_mov_b32 s58, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v5, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -12804,9 +12371,6 @@ define inreg <10 x float> @bitcast_v40i8_to_v10f32_scalar(<40 x i8> inreg %a, i3
; GFX11-NEXT: v_or_b32_e32 v9, v14, v15
; GFX11-NEXT: .LBB35_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB35_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX11-NEXT: s_branch .LBB35_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12933,9 +12497,9 @@ define inreg <5 x double> @bitcast_v10f32_to_v5f64_scalar(<10 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB37_3
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB37_4
+; SI-NEXT: s_cbranch_execnz .LBB37_3
; SI-NEXT: .LBB37_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v9, s25, 1.0
; SI-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -12949,8 +12513,6 @@ define inreg <5 x double> @bitcast_v10f32_to_v5f64_scalar(<10 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB37_3:
-; SI-NEXT: s_branch .LBB37_2
-; SI-NEXT: .LBB37_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -12973,9 +12535,9 @@ define inreg <5 x double> @bitcast_v10f32_to_v5f64_scalar(<10 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB37_3
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_4
+; VI-NEXT: s_cbranch_execnz .LBB37_3
; VI-NEXT: .LBB37_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
; VI-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -12989,8 +12551,6 @@ define inreg <5 x double> @bitcast_v10f32_to_v5f64_scalar(<10 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB37_3:
-; VI-NEXT: s_branch .LBB37_2
-; VI-NEXT: .LBB37_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -13013,9 +12573,9 @@ define inreg <5 x double> @bitcast_v10f32_to_v5f64_scalar(<10 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_4
+; GFX9-NEXT: s_cbranch_execnz .LBB37_3
; GFX9-NEXT: .LBB37_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
; GFX9-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -13029,8 +12589,6 @@ define inreg <5 x double> @bitcast_v10f32_to_v5f64_scalar(<10 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB37_3:
-; GFX9-NEXT: s_branch .LBB37_2
-; GFX9-NEXT: .LBB37_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -13058,10 +12616,10 @@ define inreg <5 x double> @bitcast_v10f32_to_v5f64_scalar(<10 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
; GFX11-NEXT: .LBB37_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
; GFX11-NEXT: v_add_f32_e64 v8, s20, 1.0
@@ -13075,8 +12633,6 @@ define inreg <5 x double> @bitcast_v10f32_to_v5f64_scalar(<10 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: s_branch .LBB37_2
-; GFX11-NEXT: .LBB37_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -13198,9 +12754,9 @@ define inreg <10 x float> @bitcast_v5f64_to_v10f32_scalar(<5 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB39_3
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_4
+; SI-NEXT: s_cbranch_execnz .LBB39_3
; SI-NEXT: .LBB39_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; SI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -13209,8 +12765,6 @@ define inreg <10 x float> @bitcast_v5f64_to_v10f32_scalar(<5 x double> inreg %a,
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB39_3:
-; SI-NEXT: s_branch .LBB39_2
-; SI-NEXT: .LBB39_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -13227,9 +12781,9 @@ define inreg <10 x float> @bitcast_v5f64_to_v10f32_scalar(<5 x double> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB39_3
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_4
+; VI-NEXT: s_cbranch_execnz .LBB39_3
; VI-NEXT: .LBB39_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -13238,8 +12792,6 @@ define inreg <10 x float> @bitcast_v5f64_to_v10f32_scalar(<5 x double> inreg %a,
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB39_3:
-; VI-NEXT: s_branch .LBB39_2
-; VI-NEXT: .LBB39_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -13256,9 +12808,9 @@ define inreg <10 x float> @bitcast_v5f64_to_v10f32_scalar(<5 x double> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
+; GFX9-NEXT: s_cbranch_execnz .LBB39_3
; GFX9-NEXT: .LBB39_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -13267,8 +12819,6 @@ define inreg <10 x float> @bitcast_v5f64_to_v10f32_scalar(<5 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
-; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -13290,10 +12840,10 @@ define inreg <10 x float> @bitcast_v5f64_to_v10f32_scalar(<5 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_3
; GFX11-NEXT: .LBB39_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[6:7], s[18:19], 1.0
@@ -13302,8 +12852,6 @@ define inreg <10 x float> @bitcast_v5f64_to_v10f32_scalar(<5 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
-; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -13436,9 +12984,9 @@ define inreg <5 x i64> @bitcast_v10f32_to_v5i64_scalar(<10 x float> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB41_3
+; SI-NEXT: s_cbranch_scc0 .LBB41_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB41_4
+; SI-NEXT: s_cbranch_execnz .LBB41_3
; SI-NEXT: .LBB41_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v9, s25, 1.0
; SI-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -13452,8 +13000,6 @@ define inreg <5 x i64> @bitcast_v10f32_to_v5i64_scalar(<10 x float> inreg %a, i3
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB41_3:
-; SI-NEXT: s_branch .LBB41_2
-; SI-NEXT: .LBB41_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -13476,9 +13022,9 @@ define inreg <5 x i64> @bitcast_v10f32_to_v5i64_scalar(<10 x float> inreg %a, i3
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB41_3
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB41_4
+; VI-NEXT: s_cbranch_execnz .LBB41_3
; VI-NEXT: .LBB41_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
; VI-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -13492,8 +13038,6 @@ define inreg <5 x i64> @bitcast_v10f32_to_v5i64_scalar(<10 x float> inreg %a, i3
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB41_3:
-; VI-NEXT: s_branch .LBB41_2
-; VI-NEXT: .LBB41_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -13516,9 +13060,9 @@ define inreg <5 x i64> @bitcast_v10f32_to_v5i64_scalar(<10 x float> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB41_4
+; GFX9-NEXT: s_cbranch_execnz .LBB41_3
; GFX9-NEXT: .LBB41_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
; GFX9-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -13532,8 +13076,6 @@ define inreg <5 x i64> @bitcast_v10f32_to_v5i64_scalar(<10 x float> inreg %a, i3
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB41_3:
-; GFX9-NEXT: s_branch .LBB41_2
-; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -13561,10 +13103,10 @@ define inreg <5 x i64> @bitcast_v10f32_to_v5i64_scalar(<10 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
; GFX11-NEXT: .LBB41_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
; GFX11-NEXT: v_add_f32_e64 v8, s20, 1.0
@@ -13578,8 +13120,6 @@ define inreg <5 x i64> @bitcast_v10f32_to_v5i64_scalar(<10 x float> inreg %a, i3
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB41_3:
-; GFX11-NEXT: s_branch .LBB41_2
-; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -13724,7 +13264,7 @@ define inreg <10 x float> @bitcast_v5i64_to_v10f32_scalar(<5 x i64> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB43_4
+; SI-NEXT: s_cbranch_scc0 .LBB43_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB43_3
; SI-NEXT: .LBB43_2: ; %cmp.true
@@ -13750,14 +13290,12 @@ define inreg <10 x float> @bitcast_v5i64_to_v10f32_scalar(<5 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB43_4:
-; SI-NEXT: s_branch .LBB43_2
;
; VI-LABEL: bitcast_v5i64_to_v10f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB43_4
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB43_3
; VI-NEXT: .LBB43_2: ; %cmp.true
@@ -13783,14 +13321,12 @@ define inreg <10 x float> @bitcast_v5i64_to_v10f32_scalar(<5 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB43_4:
-; VI-NEXT: s_branch .LBB43_2
;
; GFX9-LABEL: bitcast_v5i64_to_v10f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
@@ -13816,15 +13352,13 @@ define inreg <10 x float> @bitcast_v5i64_to_v10f32_scalar(<5 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB43_4:
-; GFX9-NEXT: s_branch .LBB43_2
;
; GFX11-LABEL: bitcast_v5i64_to_v10f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
@@ -13847,8 +13381,6 @@ define inreg <10 x float> @bitcast_v5i64_to_v10f32_scalar(<5 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB43_4:
-; GFX11-NEXT: s_branch .LBB43_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14166,7 +13698,7 @@ define inreg <20 x half> @bitcast_v20i16_to_v20f16_scalar(<20 x i16> inreg %a, i
; SI-NEXT: s_lshr_b32 s46, s17, 16
; SI-NEXT: s_lshr_b32 s59, s16, 16
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB45_4
+; SI-NEXT: s_cbranch_scc0 .LBB45_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s5, s17, 0xffff
; SI-NEXT: s_lshl_b32 s7, s46, 16
@@ -14312,24 +13844,12 @@ define inreg <20 x half> @bitcast_v20i16_to_v20f16_scalar(<20 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v8, s4
; SI-NEXT: v_mov_b32_e32 v9, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB45_4:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: s_branch .LBB45_2
;
; VI-LABEL: bitcast_v20i16_to_v20f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB45_4
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB45_3
; VI-NEXT: .LBB45_2: ; %cmp.true
@@ -14395,16 +13915,14 @@ define inreg <20 x half> @bitcast_v20i16_to_v20f16_scalar(<20 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_4:
-; VI-NEXT: s_branch .LBB45_2
;
; GFX9-LABEL: bitcast_v20i16_to_v20f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB45_4
+; GFX9-NEXT: s_cbranch_execnz .LBB45_3
; GFX9-NEXT: .LBB45_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
@@ -14418,8 +13936,6 @@ define inreg <20 x half> @bitcast_v20i16_to_v20f16_scalar(<20 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB45_3:
-; GFX9-NEXT: s_branch .LBB45_2
-; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -14447,10 +13963,10 @@ define inreg <20 x half> @bitcast_v20i16_to_v20f16_scalar(<20 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
; GFX11-NEXT: .LBB45_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v8, s20, 3 op_sel_hi:[1,0]
@@ -14464,8 +13980,6 @@ define inreg <20 x half> @bitcast_v20i16_to_v20f16_scalar(<20 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB45_3:
-; GFX11-NEXT: s_branch .LBB45_2
-; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -14754,9 +14268,9 @@ define inreg <20 x i16> @bitcast_v20f16_to_v20i16_scalar(<20 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s6, s17, 16
; SI-NEXT: s_lshr_b32 s15, s16, 16
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB47_3
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB47_4
+; SI-NEXT: s_cbranch_execnz .LBB47_3
; SI-NEXT: .LBB47_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s15
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -14843,10 +14357,8 @@ define inreg <20 x i16> @bitcast_v20f16_to_v20i16_scalar(<20 x half> inreg %a, i
; SI-NEXT: v_lshr_b64 v[14:15], v[4:5], 16
; SI-NEXT: v_lshr_b64 v[12:13], v[6:7], 16
; SI-NEXT: v_lshr_b64 v[10:11], v[8:9], 16
-; SI-NEXT: s_branch .LBB47_5
+; SI-NEXT: s_branch .LBB47_4
; SI-NEXT: .LBB47_3:
-; SI-NEXT: s_branch .LBB47_2
-; SI-NEXT: .LBB47_4:
; SI-NEXT: v_mov_b32_e32 v21, s11
; SI-NEXT: v_mov_b32_e32 v22, s9
; SI-NEXT: v_mov_b32_e32 v23, s8
@@ -14867,7 +14379,7 @@ define inreg <20 x i16> @bitcast_v20f16_to_v20i16_scalar(<20 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v14, s13
; SI-NEXT: v_mov_b32_e32 v12, s12
; SI-NEXT: v_mov_b32_e32 v10, s10
-; SI-NEXT: .LBB47_5: ; %end
+; SI-NEXT: .LBB47_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v18
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v20
; SI-NEXT: v_or_b32_e32 v0, v2, v0
@@ -14904,9 +14416,9 @@ define inreg <20 x i16> @bitcast_v20f16_to_v20i16_scalar(<20 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_4
+; VI-NEXT: s_cbranch_execnz .LBB47_3
; VI-NEXT: .LBB47_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s5, s24, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -14961,8 +14473,6 @@ define inreg <20 x i16> @bitcast_v20f16_to_v20i16_scalar(<20 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v10, v11
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB47_3:
-; VI-NEXT: s_branch .LBB47_2
-; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -14985,9 +14495,9 @@ define inreg <20 x i16> @bitcast_v20f16_to_v20i16_scalar(<20 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
+; GFX9-NEXT: s_cbranch_execnz .LBB47_3
; GFX9-NEXT: .LBB47_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v9, s25, v0 op_sel_hi:[1,0]
@@ -15002,8 +14512,6 @@ define inreg <20 x i16> @bitcast_v20f16_to_v20i16_scalar(<20 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
-; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -15031,10 +14539,10 @@ define inreg <20 x i16> @bitcast_v20f16_to_v20i16_scalar(<20 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-NEXT: .LBB47_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s20 op_sel_hi:[0,1]
@@ -15048,8 +14556,6 @@ define inreg <20 x i16> @bitcast_v20f16_to_v20i16_scalar(<20 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -16134,7 +15640,7 @@ define inreg <40 x i8> @bitcast_v20i16_to_v40i8_scalar(<20 x i16> inreg %a, i32
; SI-NEXT: s_lshr_b32 s94, s17, 16
; SI-NEXT: s_lshr_b32 s95, s16, 16
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB49_4
+; SI-NEXT: s_cbranch_scc0 .LBB49_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s95, 16
@@ -16410,44 +15916,12 @@ define inreg <40 x i8> @bitcast_v20i16_to_v40i8_scalar(<20 x i16> inreg %a, i32
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB49_4:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: s_branch .LBB49_2
;
; VI-LABEL: bitcast_v20i16_to_v40i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB49_4
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s26, s25, 24
; VI-NEXT: s_lshr_b32 s27, s25, 16
@@ -16644,44 +16118,12 @@ define inreg <40 x i8> @bitcast_v20i16_to_v40i8_scalar(<20 x i16> inreg %a, i32
; VI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB49_4:
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr29
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr28
-; VI-NEXT: ; implicit-def: $sgpr27
-; VI-NEXT: ; implicit-def: $sgpr26
-; VI-NEXT: s_branch .LBB49_2
;
; GFX9-LABEL: bitcast_v20i16_to_v40i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s26, s25, 24
; GFX9-NEXT: s_lshr_b32 s27, s25, 16
@@ -16713,7 +16155,7 @@ define inreg <40 x i8> @bitcast_v20i16_to_v40i8_scalar(<20 x i16> inreg %a, i32
; GFX9-NEXT: s_lshr_b64 s[8:9], s[20:21], 24
; GFX9-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
+; GFX9-NEXT: s_cbranch_execnz .LBB49_3
; GFX9-NEXT: .LBB49_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v2, s25, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s24, 3 op_sel_hi:[1,0]
@@ -16755,40 +16197,8 @@ define inreg <40 x i8> @bitcast_v20i16_to_v40i8_scalar(<20 x i16> inreg %a, i32
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 8, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 8, v9
-; GFX9-NEXT: s_branch .LBB49_5
+; GFX9-NEXT: s_branch .LBB49_4
; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr28
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr29
-; GFX9-NEXT: ; implicit-def: $sgpr27
-; GFX9-NEXT: ; implicit-def: $sgpr26
-; GFX9-NEXT: s_branch .LBB49_2
-; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v9, s16
; GFX9-NEXT: v_mov_b32_e32 v10, s17
; GFX9-NEXT: v_mov_b32_e32 v7, s18
@@ -16829,7 +16239,7 @@ define inreg <40 x i8> @bitcast_v20i16_to_v40i8_scalar(<20 x i16> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v13, s8
; GFX9-NEXT: v_mov_b32_e32 v14, s6
; GFX9-NEXT: v_mov_b32_e32 v15, s4
-; GFX9-NEXT: .LBB49_5: ; %end
+; GFX9-NEXT: .LBB49_4: ; %end
; GFX9-NEXT: s_mov_b32 s4, 0xc0c0004
; GFX9-NEXT: v_perm_b32 v15, v48, v15, s4
; GFX9-NEXT: v_perm_b32 v9, v9, v39, s4
@@ -16889,7 +16299,7 @@ define inreg <40 x i8> @bitcast_v20i16_to_v40i8_scalar(<20 x i16> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s14, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s15, s21, 24
; GFX11-NEXT: s_lshr_b32 s22, s21, 16
@@ -16922,7 +16332,7 @@ define inreg <40 x i8> @bitcast_v20i16_to_v40i8_scalar(<20 x i16> inreg %a, i32
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v2, s21, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s20, 3 op_sel_hi:[1,0]
@@ -16964,40 +16374,8 @@ define inreg <40 x i8> @bitcast_v20i16_to_v40i8_scalar(<20 x i16> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v38, 8, v10
; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
; GFX11-NEXT: v_lshrrev_b32_e32 v39, 8, v9
-; GFX11-NEXT: s_branch .LBB49_5
+; GFX11-NEXT: s_branch .LBB49_4
; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr29
-; GFX11-NEXT: ; implicit-def: $sgpr27
-; GFX11-NEXT: ; implicit-def: $sgpr26
-; GFX11-NEXT: ; implicit-def: $sgpr25
-; GFX11-NEXT: ; implicit-def: $sgpr23
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr24
-; GFX11-NEXT: ; implicit-def: $sgpr22
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: s_branch .LBB49_2
-; GFX11-NEXT: .LBB49_4:
; GFX11-NEXT: v_dual_mov_b32 v9, s0 :: v_dual_mov_b32 v10, s1
; GFX11-NEXT: v_dual_mov_b32 v7, s2 :: v_dual_mov_b32 v8, s3
; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v6, s17
@@ -17018,7 +16396,7 @@ define inreg <40 x i8> @bitcast_v20i16_to_v40i8_scalar(<20 x i16> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v11, s12
; GFX11-NEXT: v_dual_mov_b32 v12, s10 :: v_dual_mov_b32 v13, s8
; GFX11-NEXT: v_dual_mov_b32 v14, s6 :: v_dual_mov_b32 v15, s4
-; GFX11-NEXT: .LBB49_5: ; %end
+; GFX11-NEXT: .LBB49_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_perm_b32 v15, v48, v15, 0xc0c0004
; GFX11-NEXT: v_perm_b32 v9, v9, v39, 0xc0c0004
@@ -18462,7 +17840,7 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s59, v0
; SI-NEXT: v_writelane_b32 v27, s39, 7
-; SI-NEXT: s_cbranch_scc0 .LBB51_4
+; SI-NEXT: s_cbranch_scc0 .LBB51_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -18765,23 +18143,6 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB51_4:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: s_branch .LBB51_2
;
; VI-LABEL: bitcast_v40i8_to_v20i16_scalar:
; VI: ; %bb.0:
@@ -18814,7 +18175,7 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s10, v0
-; VI-NEXT: s_cbranch_scc0 .LBB51_4
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v9, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -18982,9 +18343,6 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v9
; VI-NEXT: .LBB51_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB51_2
;
; GFX9-LABEL: bitcast_v40i8_to_v20i16_scalar:
; GFX9: ; %bb.0:
@@ -19017,7 +18375,7 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s41, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s42, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v0, s17
; GFX9-NEXT: v_mov_b32_e32 v8, 0xc0c0004
@@ -19188,9 +18546,6 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: v_lshl_or_b32 v9, v15, 16, v9
; GFX9-NEXT: .LBB51_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB51_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB51_2
;
; GFX11-TRUE16-LABEL: bitcast_v40i8_to_v20i16_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -19220,7 +18575,7 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s45, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s58, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s58, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v8, 0xc0c0004
; GFX11-TRUE16-NEXT: s_and_b32 s59, s43, 0xff
@@ -19337,9 +18692,6 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v10.l
; GFX11-TRUE16-NEXT: .LBB51_3: ; %end
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB51_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB51_2
;
; GFX11-FAKE16-LABEL: bitcast_v40i8_to_v20i16_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -19369,7 +18721,7 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s58, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s58, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB51_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v8, 0xc0c0004
; GFX11-FAKE16-NEXT: s_and_b32 s59, s56, 0xff
@@ -19504,9 +18856,6 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v9, 16, v16
; GFX11-FAKE16-NEXT: .LBB51_3: ; %end
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB51_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB51_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -19791,7 +19140,7 @@ define inreg <5 x double> @bitcast_v20i16_to_v5f64_scalar(<20 x i16> inreg %a, i
; SI-NEXT: v_writelane_b32 v10, s50, 6
; SI-NEXT: s_cmp_lg_u32 s26, 0
; SI-NEXT: v_writelane_b32 v10, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB53_4
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s15, 16
@@ -19899,15 +19248,12 @@ define inreg <5 x double> @bitcast_v20i16_to_v5f64_scalar(<20 x i16> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB53_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB53_2
;
; VI-LABEL: bitcast_v20i16_to_v5f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB53_4
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB53_3
; VI-NEXT: .LBB53_2: ; %cmp.true
@@ -19973,16 +19319,14 @@ define inreg <5 x double> @bitcast_v20i16_to_v5f64_scalar(<20 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB53_4:
-; VI-NEXT: s_branch .LBB53_2
;
; GFX9-LABEL: bitcast_v20i16_to_v5f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB53_4
+; GFX9-NEXT: s_cbranch_execnz .LBB53_3
; GFX9-NEXT: .LBB53_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
@@ -19996,8 +19340,6 @@ define inreg <5 x double> @bitcast_v20i16_to_v5f64_scalar(<20 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB53_3:
-; GFX9-NEXT: s_branch .LBB53_2
-; GFX9-NEXT: .LBB53_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -20025,10 +19367,10 @@ define inreg <5 x double> @bitcast_v20i16_to_v5f64_scalar(<20 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-NEXT: .LBB53_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v8, s20, 3 op_sel_hi:[1,0]
@@ -20042,8 +19384,6 @@ define inreg <5 x double> @bitcast_v20i16_to_v5f64_scalar(<20 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB53_3:
-; GFX11-NEXT: s_branch .LBB53_2
-; GFX11-NEXT: .LBB53_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -20228,7 +19568,7 @@ define inreg <20 x i16> @bitcast_v5f64_to_v20i16_scalar(<5 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB55_3
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s40, s25, 16
; SI-NEXT: s_lshr_b32 s29, s23, 16
@@ -20240,7 +19580,7 @@ define inreg <20 x i16> @bitcast_v5f64_to_v20i16_scalar(<5 x double> inreg %a, i
; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB55_4
+; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; SI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -20257,20 +19597,8 @@ define inreg <20 x i16> @bitcast_v5f64_to_v20i16_scalar(<5 x double> inreg %a, i
; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v1
-; SI-NEXT: s_branch .LBB55_5
+; SI-NEXT: s_branch .LBB55_4
; SI-NEXT: .LBB55_3:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: s_branch .LBB55_2
-; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v5, s21
@@ -20291,7 +19619,7 @@ define inreg <20 x i16> @bitcast_v5f64_to_v20i16_scalar(<5 x double> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v12, s8
; SI-NEXT: v_mov_b32_e32 v11, s6
; SI-NEXT: v_mov_b32_e32 v10, s4
-; SI-NEXT: .LBB55_5: ; %end
+; SI-NEXT: .LBB55_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
@@ -20328,9 +19656,9 @@ define inreg <20 x i16> @bitcast_v5f64_to_v20i16_scalar(<5 x double> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_4
+; VI-NEXT: s_cbranch_execnz .LBB55_3
; VI-NEXT: .LBB55_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -20339,8 +19667,6 @@ define inreg <20 x i16> @bitcast_v5f64_to_v20i16_scalar(<5 x double> inreg %a, i
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB55_3:
-; VI-NEXT: s_branch .LBB55_2
-; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -20363,9 +19689,9 @@ define inreg <20 x i16> @bitcast_v5f64_to_v20i16_scalar(<5 x double> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
+; GFX9-NEXT: s_cbranch_execnz .LBB55_3
; GFX9-NEXT: .LBB55_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -20374,8 +19700,6 @@ define inreg <20 x i16> @bitcast_v5f64_to_v20i16_scalar(<5 x double> inreg %a, i
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
-; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -20403,10 +19727,10 @@ define inreg <20 x i16> @bitcast_v5f64_to_v20i16_scalar(<5 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-NEXT: .LBB55_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[6:7], s[18:19], 1.0
@@ -20415,8 +19739,6 @@ define inreg <20 x i16> @bitcast_v5f64_to_v20i16_scalar(<5 x double> inreg %a, i
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
-; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -20710,7 +20032,7 @@ define inreg <5 x i64> @bitcast_v20i16_to_v5i64_scalar(<20 x i16> inreg %a, i32
; SI-NEXT: v_writelane_b32 v10, s50, 6
; SI-NEXT: s_cmp_lg_u32 s26, 0
; SI-NEXT: v_writelane_b32 v10, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB57_4
+; SI-NEXT: s_cbranch_scc0 .LBB57_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s15, 16
@@ -20818,15 +20140,12 @@ define inreg <5 x i64> @bitcast_v20i16_to_v5i64_scalar(<20 x i16> inreg %a, i32
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB57_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB57_2
;
; VI-LABEL: bitcast_v20i16_to_v5i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB57_3
; VI-NEXT: .LBB57_2: ; %cmp.true
@@ -20892,16 +20211,14 @@ define inreg <5 x i64> @bitcast_v20i16_to_v5i64_scalar(<20 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v20i16_to_v5i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
+; GFX9-NEXT: s_cbranch_execnz .LBB57_3
; GFX9-NEXT: .LBB57_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
@@ -20915,8 +20232,6 @@ define inreg <5 x i64> @bitcast_v20i16_to_v5i64_scalar(<20 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
-; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -20944,10 +20259,10 @@ define inreg <5 x i64> @bitcast_v20i16_to_v5i64_scalar(<20 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-NEXT: .LBB57_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v8, s20, 3 op_sel_hi:[1,0]
@@ -20961,8 +20276,6 @@ define inreg <5 x i64> @bitcast_v20i16_to_v5i64_scalar(<20 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB57_3:
-; GFX11-NEXT: s_branch .LBB57_2
-; GFX11-NEXT: .LBB57_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -21170,7 +20483,7 @@ define inreg <20 x i16> @bitcast_v5i64_to_v20i16_scalar(<5 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB59_4
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s26, s25, 16
; SI-NEXT: s_lshr_b32 s27, s23, 16
@@ -21246,24 +20559,12 @@ define inreg <20 x i16> @bitcast_v5i64_to_v20i16_scalar(<5 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v8, s4
; SI-NEXT: v_mov_b32_e32 v9, s13
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB59_4:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: s_branch .LBB59_2
;
; VI-LABEL: bitcast_v5i64_to_v20i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_4
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB59_3
; VI-NEXT: .LBB59_2: ; %cmp.true
@@ -21289,14 +20590,12 @@ define inreg <20 x i16> @bitcast_v5i64_to_v20i16_scalar(<5 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB59_4:
-; VI-NEXT: s_branch .LBB59_2
;
; GFX9-LABEL: bitcast_v5i64_to_v20i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB59_3
; GFX9-NEXT: .LBB59_2: ; %cmp.true
@@ -21322,15 +20621,13 @@ define inreg <20 x i16> @bitcast_v5i64_to_v20i16_scalar(<5 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB59_4:
-; GFX9-NEXT: s_branch .LBB59_2
;
; GFX11-LABEL: bitcast_v5i64_to_v20i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB59_3
@@ -21353,8 +20650,6 @@ define inreg <20 x i16> @bitcast_v5i64_to_v20i16_scalar(<5 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB59_4:
-; GFX11-NEXT: s_branch .LBB59_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -22414,7 +21709,7 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; SI-NEXT: s_lshr_b32 s90, s17, 16
; SI-NEXT: s_lshr_b32 s91, s16, 16
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB61_3
+; SI-NEXT: s_cbranch_scc0 .LBB61_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s91, 16
@@ -22471,7 +21766,7 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; SI-NEXT: s_lshr_b64 s[62:63], s[4:5], 24
; SI-NEXT: s_lshr_b64 s[74:75], s[4:5], 16
; SI-NEXT: s_lshr_b64 s[78:79], s[4:5], 8
-; SI-NEXT: s_cbranch_execnz .LBB61_4
+; SI-NEXT: s_cbranch_execnz .LBB61_3
; SI-NEXT: .LBB61_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v2, s24
; SI-NEXT: v_cvt_f32_f16_e32 v1, s35
@@ -22598,40 +21893,8 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; SI-NEXT: v_bfe_u32 v8, v3, 8, 8
; SI-NEXT: v_bfe_u32 v6, v2, 8, 8
; SI-NEXT: v_bfe_u32 v5, v1, 8, 8
-; SI-NEXT: s_branch .LBB61_5
+; SI-NEXT: s_branch .LBB61_4
; SI-NEXT: .LBB61_3:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: s_branch .LBB61_2
-; SI-NEXT: .LBB61_4:
; SI-NEXT: v_mov_b32_e32 v1, s34
; SI-NEXT: v_mov_b32_e32 v2, s30
; SI-NEXT: v_mov_b32_e32 v3, s94
@@ -22642,7 +21905,7 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v8, s57
; SI-NEXT: v_mov_b32_e32 v9, s47
; SI-NEXT: v_mov_b32_e32 v10, s45
-; SI-NEXT: .LBB61_5: ; %end
+; SI-NEXT: .LBB61_4: ; %end
; SI-NEXT: s_and_b32 s12, s12, 0xff
; SI-NEXT: s_lshl_b32 s16, s40, 8
; SI-NEXT: s_or_b32 s12, s12, s16
@@ -22775,7 +22038,7 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB61_3
+; VI-NEXT: s_cbranch_scc0 .LBB61_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s46, s25, 24
; VI-NEXT: s_lshr_b32 s59, s25, 16
@@ -22807,7 +22070,7 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; VI-NEXT: s_lshr_b64 s[8:9], s[20:21], 24
; VI-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB61_4
+; VI-NEXT: s_cbranch_execnz .LBB61_3
; VI-NEXT: .LBB61_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v1, 0x200
@@ -22880,40 +22143,8 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; VI-NEXT: v_bfe_u32 v32, v9, 8, 8
; VI-NEXT: v_bfe_u32 v35, v8, 8, 8
; VI-NEXT: v_bfe_u32 v38, v7, 8, 8
-; VI-NEXT: s_branch .LBB61_5
+; VI-NEXT: s_branch .LBB61_4
; VI-NEXT: .LBB61_3:
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr26
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr27
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr28
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr29
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB61_2
-; VI-NEXT: .LBB61_4:
; VI-NEXT: v_mov_b32_e32 v12, s76
; VI-NEXT: v_mov_b32_e32 v7, s75
; VI-NEXT: v_mov_b32_e32 v13, s74
@@ -22954,7 +22185,7 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v3, s8
; VI-NEXT: v_mov_b32_e32 v4, s6
; VI-NEXT: v_mov_b32_e32 v5, s4
-; VI-NEXT: .LBB61_5: ; %end
+; VI-NEXT: .LBB61_4: ; %end
; VI-NEXT: s_mov_b32 s4, 0xc0c0004
; VI-NEXT: v_perm_b32 v5, v12, v5, s4
; VI-NEXT: v_perm_b32 v12, v21, v48, s4
@@ -23022,7 +22253,7 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB61_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB61_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s26, s25, 24
; GFX9-NEXT: s_lshr_b32 s27, s25, 16
@@ -23054,7 +22285,7 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; GFX9-NEXT: s_lshr_b64 s[8:9], s[20:21], 24
; GFX9-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB61_4
+; GFX9-NEXT: s_cbranch_execnz .LBB61_3
; GFX9-NEXT: .LBB61_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v1, 0x200
; GFX9-NEXT: v_pk_add_f16 v10, s17, v1 op_sel_hi:[1,0]
@@ -23097,40 +22328,8 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 8, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 8, v9
-; GFX9-NEXT: s_branch .LBB61_5
+; GFX9-NEXT: s_branch .LBB61_4
; GFX9-NEXT: .LBB61_3:
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr28
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr29
-; GFX9-NEXT: ; implicit-def: $sgpr27
-; GFX9-NEXT: ; implicit-def: $sgpr26
-; GFX9-NEXT: s_branch .LBB61_2
-; GFX9-NEXT: .LBB61_4:
; GFX9-NEXT: v_mov_b32_e32 v9, s16
; GFX9-NEXT: v_mov_b32_e32 v10, s17
; GFX9-NEXT: v_mov_b32_e32 v7, s18
@@ -23171,7 +22370,7 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v13, s8
; GFX9-NEXT: v_mov_b32_e32 v14, s6
; GFX9-NEXT: v_mov_b32_e32 v15, s4
-; GFX9-NEXT: .LBB61_5: ; %end
+; GFX9-NEXT: .LBB61_4: ; %end
; GFX9-NEXT: s_mov_b32 s4, 0xc0c0004
; GFX9-NEXT: v_perm_b32 v15, v48, v15, s4
; GFX9-NEXT: v_perm_b32 v9, v9, v39, s4
@@ -23231,7 +22430,7 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s14, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB61_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB61_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s15, s21, 24
; GFX11-NEXT: s_lshr_b32 s22, s21, 16
@@ -23264,7 +22463,7 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
-; GFX11-NEXT: s_cbranch_vccnz .LBB61_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB61_3
; GFX11-NEXT: .LBB61_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s21 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s20 op_sel_hi:[0,1]
@@ -23306,40 +22505,8 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v38, 8, v10
; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
; GFX11-NEXT: v_lshrrev_b32_e32 v39, 8, v9
-; GFX11-NEXT: s_branch .LBB61_5
+; GFX11-NEXT: s_branch .LBB61_4
; GFX11-NEXT: .LBB61_3:
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr29
-; GFX11-NEXT: ; implicit-def: $sgpr27
-; GFX11-NEXT: ; implicit-def: $sgpr26
-; GFX11-NEXT: ; implicit-def: $sgpr25
-; GFX11-NEXT: ; implicit-def: $sgpr23
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr24
-; GFX11-NEXT: ; implicit-def: $sgpr22
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: s_branch .LBB61_2
-; GFX11-NEXT: .LBB61_4:
; GFX11-NEXT: v_dual_mov_b32 v9, s0 :: v_dual_mov_b32 v10, s1
; GFX11-NEXT: v_dual_mov_b32 v7, s2 :: v_dual_mov_b32 v8, s3
; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v6, s17
@@ -23360,7 +22527,7 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v11, s12
; GFX11-NEXT: v_dual_mov_b32 v12, s10 :: v_dual_mov_b32 v13, s8
; GFX11-NEXT: v_dual_mov_b32 v14, s6 :: v_dual_mov_b32 v15, s4
-; GFX11-NEXT: .LBB61_5: ; %end
+; GFX11-NEXT: .LBB61_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_perm_b32 v15, v48, v15, 0xc0c0004
; GFX11-NEXT: v_perm_b32 v9, v9, v39, 0xc0c0004
@@ -24804,7 +23971,7 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s59, v0
; SI-NEXT: v_writelane_b32 v27, s39, 7
-; SI-NEXT: s_cbranch_scc0 .LBB63_4
+; SI-NEXT: s_cbranch_scc0 .LBB63_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -25107,23 +24274,6 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB63_4:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: s_branch .LBB63_2
;
; VI-LABEL: bitcast_v40i8_to_v20f16_scalar:
; VI: ; %bb.0:
@@ -25156,7 +24306,7 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s10, v0
-; VI-NEXT: s_cbranch_scc0 .LBB63_4
+; VI-NEXT: s_cbranch_scc0 .LBB63_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v9, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -25324,9 +24474,6 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v9
; VI-NEXT: .LBB63_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB63_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB63_2
;
; GFX9-LABEL: bitcast_v40i8_to_v20f16_scalar:
; GFX9: ; %bb.0:
@@ -25359,7 +24506,7 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s41, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s42, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB63_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB63_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v0, s17
; GFX9-NEXT: v_mov_b32_e32 v8, 0xc0c0004
@@ -25530,9 +24677,6 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: v_lshl_or_b32 v9, v15, 16, v9
; GFX9-NEXT: .LBB63_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB63_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB63_2
;
; GFX11-TRUE16-LABEL: bitcast_v40i8_to_v20f16_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -25562,7 +24706,7 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s45, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s58, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s58, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB63_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB63_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v8, 0xc0c0004
; GFX11-TRUE16-NEXT: s_and_b32 s59, s43, 0xff
@@ -25679,9 +24823,6 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v10.l
; GFX11-TRUE16-NEXT: .LBB63_3: ; %end
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB63_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB63_2
;
; GFX11-FAKE16-LABEL: bitcast_v40i8_to_v20f16_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -25711,7 +24852,7 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s58, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s58, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB63_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB63_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v8, 0xc0c0004
; GFX11-FAKE16-NEXT: s_and_b32 s59, s56, 0xff
@@ -25846,9 +24987,6 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v9, 16, v16
; GFX11-FAKE16-NEXT: .LBB63_3: ; %end
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB63_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB63_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -26173,7 +25311,7 @@ define inreg <5 x double> @bitcast_v20f16_to_v5f64_scalar(<20 x half> inreg %a,
; SI-NEXT: v_writelane_b32 v16, s50, 6
; SI-NEXT: s_cmp_lg_u32 s26, 0
; SI-NEXT: v_writelane_b32 v16, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB65_3
+; SI-NEXT: s_cbranch_scc0 .LBB65_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s15, 16
@@ -26205,7 +25343,7 @@ define inreg <5 x double> @bitcast_v20f16_to_v5f64_scalar(<20 x half> inreg %a,
; SI-NEXT: s_and_b32 s4, s25, 0xffff
; SI-NEXT: s_lshl_b32 s5, s6, 16
; SI-NEXT: s_or_b32 s45, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB65_4
+; SI-NEXT: s_cbranch_execnz .LBB65_3
; SI-NEXT: .LBB65_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s15
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -26287,11 +25425,8 @@ define inreg <5 x double> @bitcast_v20f16_to_v5f64_scalar(<20 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v8, v9, v8
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v10
; SI-NEXT: v_or_b32_e32 v9, v11, v9
-; SI-NEXT: s_branch .LBB65_5
+; SI-NEXT: s_branch .LBB65_4
; SI-NEXT: .LBB65_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB65_2
-; SI-NEXT: .LBB65_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -26308,7 +25443,7 @@ define inreg <5 x double> @bitcast_v20f16_to_v5f64_scalar(<20 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v13, s49
; SI-NEXT: v_mov_b32_e32 v14, s50
; SI-NEXT: v_mov_b32_e32 v15, s51
-; SI-NEXT: .LBB65_5: ; %end
+; SI-NEXT: .LBB65_4: ; %end
; SI-NEXT: v_readlane_b32 s51, v16, 7
; SI-NEXT: v_readlane_b32 s50, v16, 6
; SI-NEXT: v_readlane_b32 s49, v16, 5
@@ -26327,9 +25462,9 @@ define inreg <5 x double> @bitcast_v20f16_to_v5f64_scalar(<20 x half> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB65_3
+; VI-NEXT: s_cbranch_scc0 .LBB65_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB65_4
+; VI-NEXT: s_cbranch_execnz .LBB65_3
; VI-NEXT: .LBB65_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s25, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -26384,8 +25519,6 @@ define inreg <5 x double> @bitcast_v20f16_to_v5f64_scalar(<20 x half> inreg %a,
; VI-NEXT: v_or_b32_e32 v0, v0, v10
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB65_3:
-; VI-NEXT: s_branch .LBB65_2
-; VI-NEXT: .LBB65_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -26408,9 +25541,9 @@ define inreg <5 x double> @bitcast_v20f16_to_v5f64_scalar(<20 x half> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB65_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB65_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB65_4
+; GFX9-NEXT: s_cbranch_execnz .LBB65_3
; GFX9-NEXT: .LBB65_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v9, s25, v0 op_sel_hi:[1,0]
@@ -26425,8 +25558,6 @@ define inreg <5 x double> @bitcast_v20f16_to_v5f64_scalar(<20 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB65_3:
-; GFX9-NEXT: s_branch .LBB65_2
-; GFX9-NEXT: .LBB65_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -26454,10 +25585,10 @@ define inreg <5 x double> @bitcast_v20f16_to_v5f64_scalar(<20 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB65_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB65_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB65_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB65_3
; GFX11-NEXT: .LBB65_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s20 op_sel_hi:[0,1]
@@ -26471,8 +25602,6 @@ define inreg <5 x double> @bitcast_v20f16_to_v5f64_scalar(<20 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB65_3:
-; GFX11-NEXT: s_branch .LBB65_2
-; GFX11-NEXT: .LBB65_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -26657,7 +25786,7 @@ define inreg <20 x half> @bitcast_v5f64_to_v20f16_scalar(<5 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB67_3
+; SI-NEXT: s_cbranch_scc0 .LBB67_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s40, s25, 16
; SI-NEXT: s_lshr_b32 s29, s23, 16
@@ -26669,7 +25798,7 @@ define inreg <20 x half> @bitcast_v5f64_to_v20f16_scalar(<5 x double> inreg %a,
; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB67_4
+; SI-NEXT: s_cbranch_execnz .LBB67_3
; SI-NEXT: .LBB67_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; SI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -26686,20 +25815,8 @@ define inreg <20 x half> @bitcast_v5f64_to_v20f16_scalar(<5 x double> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v1
-; SI-NEXT: s_branch .LBB67_5
+; SI-NEXT: s_branch .LBB67_4
; SI-NEXT: .LBB67_3:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: s_branch .LBB67_2
-; SI-NEXT: .LBB67_4:
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v5, s21
@@ -26720,7 +25837,7 @@ define inreg <20 x half> @bitcast_v5f64_to_v20f16_scalar(<5 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v12, s8
; SI-NEXT: v_mov_b32_e32 v11, s6
; SI-NEXT: v_mov_b32_e32 v10, s4
-; SI-NEXT: .LBB67_5: ; %end
+; SI-NEXT: .LBB67_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
@@ -26757,9 +25874,9 @@ define inreg <20 x half> @bitcast_v5f64_to_v20f16_scalar(<5 x double> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB67_3
+; VI-NEXT: s_cbranch_scc0 .LBB67_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB67_4
+; VI-NEXT: s_cbranch_execnz .LBB67_3
; VI-NEXT: .LBB67_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -26768,8 +25885,6 @@ define inreg <20 x half> @bitcast_v5f64_to_v20f16_scalar(<5 x double> inreg %a,
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB67_3:
-; VI-NEXT: s_branch .LBB67_2
-; VI-NEXT: .LBB67_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -26792,9 +25907,9 @@ define inreg <20 x half> @bitcast_v5f64_to_v20f16_scalar(<5 x double> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB67_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB67_4
+; GFX9-NEXT: s_cbranch_execnz .LBB67_3
; GFX9-NEXT: .LBB67_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -26803,8 +25918,6 @@ define inreg <20 x half> @bitcast_v5f64_to_v20f16_scalar(<5 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB67_3:
-; GFX9-NEXT: s_branch .LBB67_2
-; GFX9-NEXT: .LBB67_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -26832,10 +25945,10 @@ define inreg <20 x half> @bitcast_v5f64_to_v20f16_scalar(<5 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB67_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB67_3
; GFX11-NEXT: .LBB67_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[6:7], s[18:19], 1.0
@@ -26844,8 +25957,6 @@ define inreg <20 x half> @bitcast_v5f64_to_v20f16_scalar(<5 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB67_3:
-; GFX11-NEXT: s_branch .LBB67_2
-; GFX11-NEXT: .LBB67_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -27179,7 +26290,7 @@ define inreg <5 x i64> @bitcast_v20f16_to_v5i64_scalar(<20 x half> inreg %a, i32
; SI-NEXT: v_writelane_b32 v16, s50, 6
; SI-NEXT: s_cmp_lg_u32 s26, 0
; SI-NEXT: v_writelane_b32 v16, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB69_3
+; SI-NEXT: s_cbranch_scc0 .LBB69_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s15, 16
@@ -27211,7 +26322,7 @@ define inreg <5 x i64> @bitcast_v20f16_to_v5i64_scalar(<20 x half> inreg %a, i32
; SI-NEXT: s_and_b32 s4, s25, 0xffff
; SI-NEXT: s_lshl_b32 s5, s6, 16
; SI-NEXT: s_or_b32 s45, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB69_4
+; SI-NEXT: s_cbranch_execnz .LBB69_3
; SI-NEXT: .LBB69_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s15
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -27293,11 +26404,8 @@ define inreg <5 x i64> @bitcast_v20f16_to_v5i64_scalar(<20 x half> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v8, v9, v8
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v10
; SI-NEXT: v_or_b32_e32 v9, v11, v9
-; SI-NEXT: s_branch .LBB69_5
+; SI-NEXT: s_branch .LBB69_4
; SI-NEXT: .LBB69_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB69_2
-; SI-NEXT: .LBB69_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -27314,7 +26422,7 @@ define inreg <5 x i64> @bitcast_v20f16_to_v5i64_scalar(<20 x half> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v13, s49
; SI-NEXT: v_mov_b32_e32 v14, s50
; SI-NEXT: v_mov_b32_e32 v15, s51
-; SI-NEXT: .LBB69_5: ; %end
+; SI-NEXT: .LBB69_4: ; %end
; SI-NEXT: v_readlane_b32 s51, v16, 7
; SI-NEXT: v_readlane_b32 s50, v16, 6
; SI-NEXT: v_readlane_b32 s49, v16, 5
@@ -27333,9 +26441,9 @@ define inreg <5 x i64> @bitcast_v20f16_to_v5i64_scalar(<20 x half> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB69_3
+; VI-NEXT: s_cbranch_scc0 .LBB69_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB69_4
+; VI-NEXT: s_cbranch_execnz .LBB69_3
; VI-NEXT: .LBB69_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s25, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -27390,8 +26498,6 @@ define inreg <5 x i64> @bitcast_v20f16_to_v5i64_scalar(<20 x half> inreg %a, i32
; VI-NEXT: v_or_b32_e32 v0, v0, v10
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB69_3:
-; VI-NEXT: s_branch .LBB69_2
-; VI-NEXT: .LBB69_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -27414,9 +26520,9 @@ define inreg <5 x i64> @bitcast_v20f16_to_v5i64_scalar(<20 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB69_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB69_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB69_4
+; GFX9-NEXT: s_cbranch_execnz .LBB69_3
; GFX9-NEXT: .LBB69_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v9, s25, v0 op_sel_hi:[1,0]
@@ -27431,8 +26537,6 @@ define inreg <5 x i64> @bitcast_v20f16_to_v5i64_scalar(<20 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB69_3:
-; GFX9-NEXT: s_branch .LBB69_2
-; GFX9-NEXT: .LBB69_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -27460,10 +26564,10 @@ define inreg <5 x i64> @bitcast_v20f16_to_v5i64_scalar(<20 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB69_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB69_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB69_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB69_3
; GFX11-NEXT: .LBB69_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s20 op_sel_hi:[0,1]
@@ -27477,8 +26581,6 @@ define inreg <5 x i64> @bitcast_v20f16_to_v5i64_scalar(<20 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB69_3:
-; GFX11-NEXT: s_branch .LBB69_2
-; GFX11-NEXT: .LBB69_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -27686,7 +26788,7 @@ define inreg <20 x half> @bitcast_v5i64_to_v20f16_scalar(<5 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB71_4
+; SI-NEXT: s_cbranch_scc0 .LBB71_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s26, s25, 16
; SI-NEXT: s_lshr_b32 s27, s23, 16
@@ -27762,24 +26864,12 @@ define inreg <20 x half> @bitcast_v5i64_to_v20f16_scalar(<5 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v8, s4
; SI-NEXT: v_mov_b32_e32 v9, s13
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB71_4:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: s_branch .LBB71_2
;
; VI-LABEL: bitcast_v5i64_to_v20f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB71_4
+; VI-NEXT: s_cbranch_scc0 .LBB71_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB71_3
; VI-NEXT: .LBB71_2: ; %cmp.true
@@ -27805,14 +26895,12 @@ define inreg <20 x half> @bitcast_v5i64_to_v20f16_scalar(<5 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB71_4:
-; VI-NEXT: s_branch .LBB71_2
;
; GFX9-LABEL: bitcast_v5i64_to_v20f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB71_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB71_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB71_3
; GFX9-NEXT: .LBB71_2: ; %cmp.true
@@ -27838,15 +26926,13 @@ define inreg <20 x half> @bitcast_v5i64_to_v20f16_scalar(<5 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB71_4:
-; GFX9-NEXT: s_branch .LBB71_2
;
; GFX11-LABEL: bitcast_v5i64_to_v20f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB71_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB71_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB71_3
@@ -27869,8 +26955,6 @@ define inreg <20 x half> @bitcast_v5i64_to_v20f16_scalar(<5 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB71_4:
-; GFX11-NEXT: s_branch .LBB71_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -29184,7 +28268,7 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s79, v0
; SI-NEXT: v_writelane_b32 v27, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB73_4
+; SI-NEXT: s_cbranch_scc0 .LBB73_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -29432,9 +28516,6 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB73_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB73_2
;
; VI-LABEL: bitcast_v40i8_to_v5f64_scalar:
; VI: ; %bb.0:
@@ -29467,7 +28548,7 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s62, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s63, v0
-; VI-NEXT: s_cbranch_scc0 .LBB73_4
+; VI-NEXT: s_cbranch_scc0 .LBB73_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v9, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -29635,9 +28716,6 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v9
; VI-NEXT: .LBB73_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB73_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB73_2
;
; GFX9-LABEL: bitcast_v40i8_to_v5f64_scalar:
; GFX9: ; %bb.0:
@@ -29670,7 +28748,7 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s62, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s63, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB73_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB73_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v9, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -29829,9 +28907,6 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: v_or_b32_sdwa v9, v10, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB73_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB73_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB73_2
;
; GFX11-LABEL: bitcast_v40i8_to_v5f64_scalar:
; GFX11: ; %bb.0:
@@ -29861,7 +28936,7 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: v_readfirstlane_b32 s57, v0
; GFX11-NEXT: s_cmp_lg_u32 s58, 0
; GFX11-NEXT: s_mov_b32 s58, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB73_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB73_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v5, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -30001,9 +29076,6 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: v_or_b32_e32 v9, v14, v15
; GFX11-NEXT: .LBB73_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB73_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-NEXT: s_branch .LBB73_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -30871,7 +29943,7 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB75_3
+; SI-NEXT: s_cbranch_scc0 .LBB75_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s72, s25, 24
; SI-NEXT: s_lshr_b32 s73, s25, 16
@@ -30903,7 +29975,7 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 24
; SI-NEXT: s_lshr_b64 s[58:59], s[16:17], 16
; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 8
-; SI-NEXT: s_cbranch_execnz .LBB75_4
+; SI-NEXT: s_cbranch_execnz .LBB75_3
; SI-NEXT: .LBB75_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
; SI-NEXT: v_add_f64 v[15:16], s[18:19], 1.0
@@ -30945,40 +30017,8 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; SI-NEXT: s_lshr_b32 s92, s17, 24
; SI-NEXT: s_lshr_b32 s93, s17, 16
; SI-NEXT: s_lshr_b32 s94, s17, 8
-; SI-NEXT: s_branch .LBB75_5
+; SI-NEXT: s_branch .LBB75_4
; SI-NEXT: .LBB75_3:
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: s_branch .LBB75_2
-; SI-NEXT: .LBB75_4:
; SI-NEXT: v_mov_b32_e32 v20, s16
; SI-NEXT: v_mov_b32_e32 v15, s18
; SI-NEXT: v_mov_b32_e32 v8, s20
@@ -30999,7 +30039,7 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v10, s6
; SI-NEXT: v_mov_b32_e32 v6, s4
; SI-NEXT: v_mov_b32_e32 v5, s10
-; SI-NEXT: .LBB75_5: ; %end
+; SI-NEXT: .LBB75_4: ; %end
; SI-NEXT: v_and_b32_e32 v2, 0xff, v20
; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v28
; SI-NEXT: s_and_b32 s4, s17, 0xff
@@ -31130,7 +30170,7 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB75_3
+; VI-NEXT: s_cbranch_scc0 .LBB75_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s58, s25, 24
; VI-NEXT: s_lshr_b32 s57, s25, 16
@@ -31162,7 +30202,7 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; VI-NEXT: s_lshr_b64 s[8:9], s[20:21], 24
; VI-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB75_4
+; VI-NEXT: s_cbranch_execnz .LBB75_3
; VI-NEXT: .LBB75_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[1:2], s[24:25], 1.0
; VI-NEXT: v_add_f64 v[3:4], s[22:23], 1.0
@@ -31199,40 +30239,8 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; VI-NEXT: v_lshrrev_b32_e32 v48, 8, v10
; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v9
; VI-NEXT: v_lshrrev_b32_e32 v36, 8, v9
-; VI-NEXT: s_branch .LBB75_5
+; VI-NEXT: s_branch .LBB75_4
; VI-NEXT: .LBB75_3:
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr26
-; VI-NEXT: ; implicit-def: $sgpr27
-; VI-NEXT: ; implicit-def: $sgpr28
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr29
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: s_branch .LBB75_2
-; VI-NEXT: .LBB75_4:
; VI-NEXT: v_mov_b32_e32 v9, s16
; VI-NEXT: v_mov_b32_e32 v7, s18
; VI-NEXT: v_mov_b32_e32 v5, s20
@@ -31273,7 +30281,7 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v13, s8
; VI-NEXT: v_mov_b32_e32 v12, s10
; VI-NEXT: v_mov_b32_e32 v11, s12
-; VI-NEXT: .LBB75_5: ; %end
+; VI-NEXT: .LBB75_4: ; %end
; VI-NEXT: s_mov_b32 s4, 0xc0c0004
; VI-NEXT: v_perm_b32 v15, v34, v15, s4
; VI-NEXT: v_perm_b32 v9, v9, v36, s4
@@ -31341,7 +30349,7 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB75_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB75_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s58, s25, 24
; GFX9-NEXT: s_lshr_b32 s57, s25, 16
@@ -31373,7 +30381,7 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; GFX9-NEXT: s_lshr_b64 s[8:9], s[20:21], 24
; GFX9-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB75_4
+; GFX9-NEXT: s_cbranch_execnz .LBB75_3
; GFX9-NEXT: .LBB75_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[1:2], s[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[3:4], s[22:23], 1.0
@@ -31410,40 +30418,8 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 8, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 8, v9
-; GFX9-NEXT: s_branch .LBB75_5
+; GFX9-NEXT: s_branch .LBB75_4
; GFX9-NEXT: .LBB75_3:
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr26
-; GFX9-NEXT: ; implicit-def: $sgpr27
-; GFX9-NEXT: ; implicit-def: $sgpr28
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr29
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: s_branch .LBB75_2
-; GFX9-NEXT: .LBB75_4:
; GFX9-NEXT: v_mov_b32_e32 v9, s16
; GFX9-NEXT: v_mov_b32_e32 v7, s18
; GFX9-NEXT: v_mov_b32_e32 v5, s20
@@ -31484,7 +30460,7 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v13, s8
; GFX9-NEXT: v_mov_b32_e32 v12, s10
; GFX9-NEXT: v_mov_b32_e32 v11, s12
-; GFX9-NEXT: .LBB75_5: ; %end
+; GFX9-NEXT: .LBB75_4: ; %end
; GFX9-NEXT: s_mov_b32 s4, 0xc0c0004
; GFX9-NEXT: v_perm_b32 v15, v33, v15, s4
; GFX9-NEXT: v_perm_b32 v9, v9, v35, s4
@@ -31544,7 +30520,7 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s14, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB75_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB75_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s45, s21, 24
; GFX11-NEXT: s_lshr_b32 s44, s21, 16
@@ -31577,7 +30553,7 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
-; GFX11-NEXT: s_cbranch_vccnz .LBB75_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB75_3
; GFX11-NEXT: .LBB75_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[5:6], s[16:17], 1.0
; GFX11-NEXT: v_add_f64 v[1:2], s[20:21], 1.0
@@ -31614,40 +30590,8 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v48, 8, v13
; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v12
; GFX11-NEXT: v_lshrrev_b32_e32 v35, 8, v12
-; GFX11-NEXT: s_branch .LBB75_5
+; GFX11-NEXT: s_branch .LBB75_4
; GFX11-NEXT: .LBB75_3:
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr22
-; GFX11-NEXT: ; implicit-def: $sgpr23
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr24
-; GFX11-NEXT: ; implicit-def: $sgpr25
-; GFX11-NEXT: ; implicit-def: $sgpr26
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr27
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr29
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: s_branch .LBB75_2
-; GFX11-NEXT: .LBB75_4:
; GFX11-NEXT: v_dual_mov_b32 v12, s0 :: v_dual_mov_b32 v7, s2
; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v2, s21
; GFX11-NEXT: v_dual_mov_b32 v3, s18 :: v_dual_mov_b32 v4, s19
@@ -31668,7 +30612,7 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v36, s26 :: v_dual_mov_b32 v37, s25
; GFX11-NEXT: v_dual_mov_b32 v33, s24 :: v_dual_mov_b32 v38, s23
; GFX11-NEXT: v_dual_mov_b32 v39, s22 :: v_dual_mov_b32 v48, s15
-; GFX11-NEXT: .LBB75_5: ; %end
+; GFX11-NEXT: .LBB75_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_perm_b32 v16, v34, v16, 0xc0c0004
; GFX11-NEXT: v_perm_b32 v12, v12, v35, 0xc0c0004
@@ -33028,7 +31972,7 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s79, v0
; SI-NEXT: v_writelane_b32 v27, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB77_4
+; SI-NEXT: s_cbranch_scc0 .LBB77_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -33276,9 +32220,6 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB77_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB77_2
;
; VI-LABEL: bitcast_v40i8_to_v5i64_scalar:
; VI: ; %bb.0:
@@ -33311,7 +32252,7 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; VI-NEXT: v_readfirstlane_b32 s62, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s63, v0
-; VI-NEXT: s_cbranch_scc0 .LBB77_4
+; VI-NEXT: s_cbranch_scc0 .LBB77_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v9, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -33479,9 +32420,6 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v9
; VI-NEXT: .LBB77_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB77_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB77_2
;
; GFX9-LABEL: bitcast_v40i8_to_v5i64_scalar:
; GFX9: ; %bb.0:
@@ -33514,7 +32452,7 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; GFX9-NEXT: v_readfirstlane_b32 s62, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s63, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB77_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB77_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v9, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -33673,9 +32611,6 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; GFX9-NEXT: v_or_b32_sdwa v9, v10, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB77_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB77_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB77_2
;
; GFX11-LABEL: bitcast_v40i8_to_v5i64_scalar:
; GFX11: ; %bb.0:
@@ -33705,7 +32640,7 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; GFX11-NEXT: v_readfirstlane_b32 s57, v0
; GFX11-NEXT: s_cmp_lg_u32 s58, 0
; GFX11-NEXT: s_mov_b32 s58, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB77_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB77_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v5, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -33845,9 +32780,6 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; GFX11-NEXT: v_or_b32_e32 v9, v14, v15
; GFX11-NEXT: .LBB77_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB77_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-NEXT: s_branch .LBB77_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -34746,7 +33678,7 @@ define inreg <40 x i8> @bitcast_v5i64_to_v40i8_scalar(<5 x i64> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB79_4
+; SI-NEXT: s_cbranch_scc0 .LBB79_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s72, s25, 24
; SI-NEXT: s_lshr_b32 s73, s25, 16
@@ -34950,44 +33882,12 @@ define inreg <40 x i8> @bitcast_v5i64_to_v40i8_scalar(<5 x i64> inreg %a, i32 in
; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB79_4:
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: s_branch .LBB79_2
;
; VI-LABEL: bitcast_v5i64_to_v40i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB79_4
+; VI-NEXT: s_cbranch_scc0 .LBB79_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s26, s25, 24
; VI-NEXT: s_lshr_b32 s27, s25, 16
@@ -35144,44 +34044,12 @@ define inreg <40 x i8> @bitcast_v5i64_to_v40i8_scalar(<5 x i64> inreg %a, i32 in
; VI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB79_4:
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr29
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr28
-; VI-NEXT: ; implicit-def: $sgpr27
-; VI-NEXT: ; implicit-def: $sgpr26
-; VI-NEXT: s_branch .LBB79_2
;
; GFX9-LABEL: bitcast_v5i64_to_v40i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB79_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB79_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s26, s25, 24
; GFX9-NEXT: s_lshr_b32 s27, s25, 16
@@ -35329,45 +34197,13 @@ define inreg <40 x i8> @bitcast_v5i64_to_v40i8_scalar(<5 x i64> inreg %a, i32 in
; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:36
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB79_4:
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr29
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr28
-; GFX9-NEXT: ; implicit-def: $sgpr27
-; GFX9-NEXT: ; implicit-def: $sgpr26
-; GFX9-NEXT: s_branch .LBB79_2
;
; GFX11-LABEL: bitcast_v5i64_to_v40i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s63, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB79_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB79_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s14, s21, 24
; GFX11-NEXT: s_lshr_b32 s22, s21, 16
@@ -35490,38 +34326,6 @@ define inreg <40 x i8> @bitcast_v5i64_to_v40i8_scalar(<5 x i64> inreg %a, i32 in
; GFX11-NEXT: scratch_store_b128 v0, v[5:8], off offset:16
; GFX11-NEXT: scratch_store_b64 v0, v[9:10], off offset:32
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB79_4:
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr29
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr25
-; GFX11-NEXT: ; implicit-def: $sgpr27
-; GFX11-NEXT: ; implicit-def: $sgpr26
-; GFX11-NEXT: ; implicit-def: $sgpr23
-; GFX11-NEXT: ; implicit-def: $sgpr24
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr22
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: s_branch .LBB79_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -35634,9 +34438,9 @@ define inreg <5 x i64> @bitcast_v5f64_to_v5i64_scalar(<5 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB81_3
+; SI-NEXT: s_cbranch_scc0 .LBB81_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB81_4
+; SI-NEXT: s_cbranch_execnz .LBB81_3
; SI-NEXT: .LBB81_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
@@ -35645,8 +34449,6 @@ define inreg <5 x i64> @bitcast_v5f64_to_v5i64_scalar(<5 x double> inreg %a, i32
; SI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB81_3:
-; SI-NEXT: s_branch .LBB81_2
-; SI-NEXT: .LBB81_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -35669,9 +34471,9 @@ define inreg <5 x i64> @bitcast_v5f64_to_v5i64_scalar(<5 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB81_3
+; VI-NEXT: s_cbranch_scc0 .LBB81_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB81_4
+; VI-NEXT: s_cbranch_execnz .LBB81_3
; VI-NEXT: .LBB81_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
@@ -35680,8 +34482,6 @@ define inreg <5 x i64> @bitcast_v5f64_to_v5i64_scalar(<5 x double> inreg %a, i32
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB81_3:
-; VI-NEXT: s_branch .LBB81_2
-; VI-NEXT: .LBB81_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -35704,9 +34504,9 @@ define inreg <5 x i64> @bitcast_v5f64_to_v5i64_scalar(<5 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB81_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB81_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB81_4
+; GFX9-NEXT: s_cbranch_execnz .LBB81_3
; GFX9-NEXT: .LBB81_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
@@ -35715,8 +34515,6 @@ define inreg <5 x i64> @bitcast_v5f64_to_v5i64_scalar(<5 x double> inreg %a, i32
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB81_3:
-; GFX9-NEXT: s_branch .LBB81_2
-; GFX9-NEXT: .LBB81_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -35744,10 +34542,10 @@ define inreg <5 x i64> @bitcast_v5f64_to_v5i64_scalar(<5 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB81_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB81_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB81_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB81_3
; GFX11-NEXT: .LBB81_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
@@ -35756,8 +34554,6 @@ define inreg <5 x i64> @bitcast_v5f64_to_v5i64_scalar(<5 x double> inreg %a, i32
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB81_3:
-; GFX11-NEXT: s_branch .LBB81_2
-; GFX11-NEXT: .LBB81_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -35902,7 +34698,7 @@ define inreg <5 x double> @bitcast_v5i64_to_v5f64_scalar(<5 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB83_4
+; SI-NEXT: s_cbranch_scc0 .LBB83_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB83_3
; SI-NEXT: .LBB83_2: ; %cmp.true
@@ -35928,14 +34724,12 @@ define inreg <5 x double> @bitcast_v5i64_to_v5f64_scalar(<5 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB83_4:
-; SI-NEXT: s_branch .LBB83_2
;
; VI-LABEL: bitcast_v5i64_to_v5f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB83_4
+; VI-NEXT: s_cbranch_scc0 .LBB83_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB83_3
; VI-NEXT: .LBB83_2: ; %cmp.true
@@ -35961,14 +34755,12 @@ define inreg <5 x double> @bitcast_v5i64_to_v5f64_scalar(<5 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB83_4:
-; VI-NEXT: s_branch .LBB83_2
;
; GFX9-LABEL: bitcast_v5i64_to_v5f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB83_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB83_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB83_3
; GFX9-NEXT: .LBB83_2: ; %cmp.true
@@ -35994,15 +34786,13 @@ define inreg <5 x double> @bitcast_v5i64_to_v5f64_scalar(<5 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB83_4:
-; GFX9-NEXT: s_branch .LBB83_2
;
; GFX11-LABEL: bitcast_v5i64_to_v5f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB83_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB83_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB83_3
@@ -36024,8 +34814,6 @@ define inreg <5 x double> @bitcast_v5i64_to_v5f64_scalar(<5 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB83_4:
-; GFX11-NEXT: s_branch .LBB83_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll
index 0a8af1ab3e547..6091e4c8950d1 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll
@@ -81,7 +81,7 @@ define inreg float @bitcast_i32_to_f32_scalar(i32 inreg %a, i32 inreg %b) {
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -89,14 +89,12 @@ define inreg float @bitcast_i32_to_f32_scalar(i32 inreg %a, i32 inreg %b) {
; SI-NEXT: .LBB1_3: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_i32_to_f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -104,14 +102,12 @@ define inreg float @bitcast_i32_to_f32_scalar(i32 inreg %a, i32 inreg %b) {
; VI-NEXT: .LBB1_3: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_i32_to_f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -119,8 +115,6 @@ define inreg float @bitcast_i32_to_f32_scalar(i32 inreg %a, i32 inreg %b) {
; GFX9-NEXT: .LBB1_3: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_i32_to_f32_scalar:
; GFX11: ; %bb.0:
@@ -232,15 +226,13 @@ define inreg i32 @bitcast_f32_to_i32_scalar(float inreg %a, i32 inreg %b) {
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -248,15 +240,13 @@ define inreg i32 @bitcast_f32_to_i32_scalar(float inreg %a, i32 inreg %b) {
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -264,15 +254,13 @@ define inreg i32 @bitcast_f32_to_i32_scalar(float inreg %a, i32 inreg %b) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -281,16 +269,14 @@ define inreg i32 @bitcast_f32_to_i32_scalar(float inreg %a, i32 inreg %b) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -393,7 +379,7 @@ define inreg <2 x i16> @bitcast_i32_to_v2i16_scalar(i32 inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_cbranch_execnz .LBB5_3
@@ -406,15 +392,12 @@ define inreg <2 x i16> @bitcast_i32_to_v2i16_scalar(i32 inreg %a, i32 inreg %b)
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_i32_to_v2i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -422,14 +405,12 @@ define inreg <2 x i16> @bitcast_i32_to_v2i16_scalar(i32 inreg %a, i32 inreg %b)
; VI-NEXT: .LBB5_3: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_i32_to_v2i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -437,8 +418,6 @@ define inreg <2 x i16> @bitcast_i32_to_v2i16_scalar(i32 inreg %a, i32 inreg %b)
; GFX9-NEXT: .LBB5_3: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_i32_to_v2i16_scalar:
; GFX11: ; %bb.0:
@@ -573,7 +552,7 @@ define inreg i32 @bitcast_v2i16_to_i32_scalar(<2 x i16> inreg %a, i32 inreg %b)
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s6, 16
@@ -588,15 +567,12 @@ define inreg i32 @bitcast_v2i16_to_i32_scalar(<2 x i16> inreg %a, i32 inreg %b)
; SI-NEXT: .LBB7_3: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s7
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v2i16_to_i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -608,22 +584,18 @@ define inreg i32 @bitcast_v2i16_to_i32_scalar(<2 x i16> inreg %a, i32 inreg %b)
; VI-NEXT: .LBB7_3: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v2i16_to_i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_4
+; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB7_3:
-; GFX9-NEXT: s_branch .LBB7_2
-; GFX9-NEXT: .LBB7_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -632,16 +604,14 @@ define inreg i32 @bitcast_v2i16_to_i32_scalar(<2 x i16> inreg %a, i32 inreg %b)
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
; GFX11-NEXT: .LBB7_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: s_branch .LBB7_2
-; GFX11-NEXT: .LBB7_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -744,7 +714,7 @@ define inreg <2 x half> @bitcast_i32_to_v2f16_scalar(i32 inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_cbranch_execnz .LBB9_3
@@ -757,15 +727,12 @@ define inreg <2 x half> @bitcast_i32_to_v2f16_scalar(i32 inreg %a, i32 inreg %b)
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_i32_to_v2f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -773,14 +740,12 @@ define inreg <2 x half> @bitcast_i32_to_v2f16_scalar(i32 inreg %a, i32 inreg %b)
; VI-NEXT: .LBB9_3: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_i32_to_v2f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -788,8 +753,6 @@ define inreg <2 x half> @bitcast_i32_to_v2f16_scalar(i32 inreg %a, i32 inreg %b)
; GFX9-NEXT: .LBB9_3: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_i32_to_v2f16_scalar:
; GFX11: ; %bb.0:
@@ -924,12 +887,12 @@ define inreg i32 @bitcast_v2f16_to_i32_scalar(<2 x half> inreg %a, i32 inreg %b)
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s6, 16
; SI-NEXT: s_or_b32 s7, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s6
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -941,9 +904,6 @@ define inreg i32 @bitcast_v2f16_to_i32_scalar(<2 x half> inreg %a, i32 inreg %b)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB11_3:
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s7
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -951,9 +911,9 @@ define inreg i32 @bitcast_v2f16_to_i32_scalar(<2 x half> inreg %a, i32 inreg %b)
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -963,8 +923,6 @@ define inreg i32 @bitcast_v2f16_to_i32_scalar(<2 x half> inreg %a, i32 inreg %b)
; VI-NEXT: v_or_b32_e32 v0, v0, v1
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -972,16 +930,14 @@ define inreg i32 @bitcast_v2f16_to_i32_scalar(<2 x half> inreg %a, i32 inreg %b)
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -990,16 +946,14 @@ define inreg i32 @bitcast_v2f16_to_i32_scalar(<2 x half> inreg %a, i32 inreg %b)
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -1107,7 +1061,7 @@ define inreg <2 x bfloat> @bitcast_i32_to_v2bf16_scalar(i32 inreg %a, i32 inreg
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB13_4
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s7, s16, 0xffff0000
; SI-NEXT: s_lshl_b32 s6, s16, 16
@@ -1122,16 +1076,12 @@ define inreg <2 x bfloat> @bitcast_i32_to_v2bf16_scalar(i32 inreg %a, i32 inreg
; SI-NEXT: v_mul_f32_e64 v0, 1.0, s6
; SI-NEXT: v_lshr_b64 v[0:1], v[0:1], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB13_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB13_2
;
; VI-LABEL: bitcast_i32_to_v2bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
@@ -1139,14 +1089,12 @@ define inreg <2 x bfloat> @bitcast_i32_to_v2bf16_scalar(i32 inreg %a, i32 inreg
; VI-NEXT: .LBB13_3: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_i32_to_v2bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
@@ -1154,8 +1102,6 @@ define inreg <2 x bfloat> @bitcast_i32_to_v2bf16_scalar(i32 inreg %a, i32 inreg
; GFX9-NEXT: .LBB13_3: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_i32_to_v2bf16_scalar:
; GFX11: ; %bb.0:
@@ -1385,7 +1331,7 @@ define inreg i32 @bitcast_v2bf16_to_i32_scalar(<2 x bfloat> inreg %a, i32 inreg
; SI-NEXT: s_cmp_lg_u32 s17, 0
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v4
; SI-NEXT: v_lshr_b64 v[0:1], v[2:3], 16
@@ -1399,17 +1345,14 @@ define inreg i32 @bitcast_v2bf16_to_i32_scalar(<2 x bfloat> inreg %a, i32 inreg
; SI-NEXT: v_lshr_b64 v[0:1], v[0:1], 16
; SI-NEXT: .LBB15_3: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v2bf16_to_i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_3
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB15_4
+; VI-NEXT: s_cbranch_execnz .LBB15_3
; VI-NEXT: .LBB15_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, 0x40c00000
@@ -1432,8 +1375,6 @@ define inreg i32 @bitcast_v2bf16_to_i32_scalar(<2 x bfloat> inreg %a, i32 inreg
; VI-NEXT: v_lshrrev_b64 v[0:1], 16, v[0:1]
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB15_3:
-; VI-NEXT: s_branch .LBB15_2
-; VI-NEXT: .LBB15_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -1441,9 +1382,9 @@ define inreg i32 @bitcast_v2bf16_to_i32_scalar(<2 x bfloat> inreg %a, i32 inreg
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s16, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -1468,8 +1409,6 @@ define inreg i32 @bitcast_v2bf16_to_i32_scalar(<2 x bfloat> inreg %a, i32 inreg
; GFX9-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -1478,10 +1417,10 @@ define inreg i32 @bitcast_v2bf16_to_i32_scalar(<2 x bfloat> inreg %a, i32 inreg
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -1509,8 +1448,6 @@ define inreg i32 @bitcast_v2bf16_to_i32_scalar(<2 x bfloat> inreg %a, i32 inreg
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB15_2
-; GFX11-TRUE16-NEXT: .LBB15_4:
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -1519,10 +1456,10 @@ define inreg i32 @bitcast_v2bf16_to_i32_scalar(<2 x bfloat> inreg %a, i32 inreg
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-FAKE16-NEXT: .LBB15_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
@@ -1550,8 +1487,6 @@ define inreg i32 @bitcast_v2bf16_to_i32_scalar(<2 x bfloat> inreg %a, i32 inreg
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB15_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB15_2
-; GFX11-FAKE16-NEXT: .LBB15_4:
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -1646,7 +1581,7 @@ define inreg <1 x i32> @bitcast_i32_to_v1i32_scalar(i32 inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB17_4
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB17_3
; SI-NEXT: .LBB17_2: ; %cmp.true
@@ -1654,14 +1589,12 @@ define inreg <1 x i32> @bitcast_i32_to_v1i32_scalar(i32 inreg %a, i32 inreg %b)
; SI-NEXT: .LBB17_3: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB17_4:
-; SI-NEXT: s_branch .LBB17_2
;
; VI-LABEL: bitcast_i32_to_v1i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
@@ -1669,14 +1602,12 @@ define inreg <1 x i32> @bitcast_i32_to_v1i32_scalar(i32 inreg %a, i32 inreg %b)
; VI-NEXT: .LBB17_3: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_i32_to_v1i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
@@ -1684,8 +1615,6 @@ define inreg <1 x i32> @bitcast_i32_to_v1i32_scalar(i32 inreg %a, i32 inreg %b)
; GFX9-NEXT: .LBB17_3: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_i32_to_v1i32_scalar:
; GFX11: ; %bb.0:
@@ -1797,7 +1726,7 @@ define inreg i32 @bitcast_v1i32_to_i32_scalar(<1 x i32> inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB19_4
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
@@ -1805,14 +1734,12 @@ define inreg i32 @bitcast_v1i32_to_i32_scalar(<1 x i32> inreg %a, i32 inreg %b)
; SI-NEXT: .LBB19_3: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB19_4:
-; SI-NEXT: s_branch .LBB19_2
;
; VI-LABEL: bitcast_v1i32_to_i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_4
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
@@ -1820,14 +1747,12 @@ define inreg i32 @bitcast_v1i32_to_i32_scalar(<1 x i32> inreg %a, i32 inreg %b)
; VI-NEXT: .LBB19_3: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB19_4:
-; VI-NEXT: s_branch .LBB19_2
;
; GFX9-LABEL: bitcast_v1i32_to_i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
@@ -1835,8 +1760,6 @@ define inreg i32 @bitcast_v1i32_to_i32_scalar(<1 x i32> inreg %a, i32 inreg %b)
; GFX9-NEXT: .LBB19_3: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB19_4:
-; GFX9-NEXT: s_branch .LBB19_2
;
; GFX11-LABEL: bitcast_v1i32_to_i32_scalar:
; GFX11: ; %bb.0:
@@ -2044,7 +1967,7 @@ define inreg <4 x i8> @bitcast_i32_to_v4i8_scalar(i32 inreg %a, i32 inreg %b) {
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB21_4
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s6, s16, 24
; SI-NEXT: s_lshr_b32 s7, s16, 16
@@ -2061,17 +1984,12 @@ define inreg <4 x i8> @bitcast_i32_to_v4i8_scalar(i32 inreg %a, i32 inreg %b) {
; SI-NEXT: v_mov_b32_e32 v2, s7
; SI-NEXT: v_mov_b32_e32 v3, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_4:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB21_2
;
; VI-LABEL: bitcast_i32_to_v4i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s6, s16, 24
; VI-NEXT: s_lshr_b32 s7, s16, 16
@@ -2088,17 +2006,12 @@ define inreg <4 x i8> @bitcast_i32_to_v4i8_scalar(i32 inreg %a, i32 inreg %b) {
; VI-NEXT: v_mov_b32_e32 v2, s7
; VI-NEXT: v_mov_b32_e32 v3, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr7
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_i32_to_v4i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s6, s16, 24
; GFX9-NEXT: s_lshr_b32 s7, s16, 16
@@ -2115,18 +2028,13 @@ define inreg <4 x i8> @bitcast_i32_to_v4i8_scalar(i32 inreg %a, i32 inreg %b) {
; GFX9-NEXT: v_mov_b32_e32 v2, s7
; GFX9-NEXT: v_mov_b32_e32 v3, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr7
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_i32_to_v4i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s1, s0, 24
; GFX11-NEXT: s_lshr_b32 s2, s0, 16
@@ -2144,11 +2052,6 @@ define inreg <4 x i8> @bitcast_i32_to_v4i8_scalar(i32 inreg %a, i32 inreg %b) {
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s3
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_4:
-; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: ; implicit-def: $sgpr2
-; GFX11-NEXT: ; implicit-def: $sgpr1
-; GFX11-NEXT: s_branch .LBB21_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2406,7 +2309,7 @@ define inreg i32 @bitcast_v4i8_to_i32_scalar(<4 x i8> inreg %a, i32 inreg %b) {
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -2435,15 +2338,12 @@ define inreg i32 @bitcast_v4i8_to_i32_scalar(<4 x i8> inreg %a, i32 inreg %b) {
; SI-NEXT: .LBB23_3: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v4i8_to_i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v0, s17
; VI-NEXT: v_mov_b32_e32 v1, 0xc0c0004
@@ -2467,15 +2367,12 @@ define inreg i32 @bitcast_v4i8_to_i32_scalar(<4 x i8> inreg %a, i32 inreg %b) {
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x3000000, v0
; VI-NEXT: .LBB23_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: ; implicit-def: $vgpr0
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v4i8_to_i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v0, s17
; GFX9-NEXT: v_mov_b32_e32 v1, 0xc0c0004
@@ -2499,16 +2396,13 @@ define inreg i32 @bitcast_v4i8_to_i32_scalar(<4 x i8> inreg %a, i32 inreg %b) {
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB23_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v4i8_to_i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -2535,9 +2429,6 @@ define inreg i32 @bitcast_v4i8_to_i32_scalar(<4 x i8> inreg %a, i32 inreg %b) {
; GFX11-NEXT: v_or_b32_e32 v0, v1, v0
; GFX11-NEXT: .LBB23_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0
-; GFX11-NEXT: s_branch .LBB23_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2638,21 +2529,18 @@ define inreg <2 x i16> @bitcast_f32_to_v2i16_scalar(float inreg %a, i32 inreg %b
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB25_3
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s6, s16, 16
-; SI-NEXT: s_cbranch_execnz .LBB25_4
+; SI-NEXT: s_cbranch_execnz .LBB25_3
; SI-NEXT: .LBB25_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; SI-NEXT: s_branch .LBB25_5
+; SI-NEXT: s_branch .LBB25_4
; SI-NEXT: .LBB25_3:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB25_2
-; SI-NEXT: .LBB25_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s6
-; SI-NEXT: .LBB25_5: ; %end
+; SI-NEXT: .LBB25_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
@@ -2662,15 +2550,13 @@ define inreg <2 x i16> @bitcast_f32_to_v2i16_scalar(float inreg %a, i32 inreg %b
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB25_3
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_4
+; VI-NEXT: s_cbranch_execnz .LBB25_3
; VI-NEXT: .LBB25_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB25_3:
-; VI-NEXT: s_branch .LBB25_2
-; VI-NEXT: .LBB25_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -2678,15 +2564,13 @@ define inreg <2 x i16> @bitcast_f32_to_v2i16_scalar(float inreg %a, i32 inreg %b
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_4
+; GFX9-NEXT: s_cbranch_execnz .LBB25_3
; GFX9-NEXT: .LBB25_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB25_3:
-; GFX9-NEXT: s_branch .LBB25_2
-; GFX9-NEXT: .LBB25_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -2695,16 +2579,14 @@ define inreg <2 x i16> @bitcast_f32_to_v2i16_scalar(float inreg %a, i32 inreg %b
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_3
; GFX11-NEXT: .LBB25_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
-; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -2822,7 +2704,7 @@ define inreg float @bitcast_v2i16_to_f32_scalar(<2 x i16> inreg %a, i32 inreg %b
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB27_4
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s6, 16
@@ -2837,15 +2719,12 @@ define inreg float @bitcast_v2i16_to_f32_scalar(<2 x i16> inreg %a, i32 inreg %b
; SI-NEXT: .LBB27_3: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s7
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB27_4:
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB27_2
;
; VI-LABEL: bitcast_v2i16_to_f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB27_4
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB27_3
; VI-NEXT: .LBB27_2: ; %cmp.true
@@ -2857,22 +2736,18 @@ define inreg float @bitcast_v2i16_to_f32_scalar(<2 x i16> inreg %a, i32 inreg %b
; VI-NEXT: .LBB27_3: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_4:
-; VI-NEXT: s_branch .LBB27_2
;
; GFX9-LABEL: bitcast_v2i16_to_f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_4
+; GFX9-NEXT: s_cbranch_execnz .LBB27_3
; GFX9-NEXT: .LBB27_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB27_3:
-; GFX9-NEXT: s_branch .LBB27_2
-; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -2881,16 +2756,14 @@ define inreg float @bitcast_v2i16_to_f32_scalar(<2 x i16> inreg %a, i32 inreg %b
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB27_3
; GFX11-NEXT: .LBB27_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
-; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -2993,21 +2866,18 @@ define inreg <2 x half> @bitcast_f32_to_v2f16_scalar(float inreg %a, i32 inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s6, s16, 16
-; SI-NEXT: s_cbranch_execnz .LBB29_4
+; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; SI-NEXT: s_branch .LBB29_5
+; SI-NEXT: s_branch .LBB29_4
; SI-NEXT: .LBB29_3:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB29_2
-; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s6
-; SI-NEXT: .LBB29_5: ; %end
+; SI-NEXT: .LBB29_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
@@ -3017,15 +2887,13 @@ define inreg <2 x half> @bitcast_f32_to_v2f16_scalar(float inreg %a, i32 inreg %
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
+; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
-; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -3033,15 +2901,13 @@ define inreg <2 x half> @bitcast_f32_to_v2f16_scalar(float inreg %a, i32 inreg %
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
+; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
-; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -3050,16 +2916,14 @@ define inreg <2 x half> @bitcast_f32_to_v2f16_scalar(float inreg %a, i32 inreg %
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-NEXT: .LBB29_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
-; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -3177,12 +3041,12 @@ define inreg float @bitcast_v2f16_to_f32_scalar(<2 x half> inreg %a, i32 inreg %
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB31_3
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s6, 16
; SI-NEXT: s_or_b32 s7, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB31_4
+; SI-NEXT: s_cbranch_execnz .LBB31_3
; SI-NEXT: .LBB31_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s6
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -3194,9 +3058,6 @@ define inreg float @bitcast_v2f16_to_f32_scalar(<2 x half> inreg %a, i32 inreg %
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB31_3:
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB31_2
-; SI-NEXT: .LBB31_4:
; SI-NEXT: v_mov_b32_e32 v0, s7
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -3204,9 +3065,9 @@ define inreg float @bitcast_v2f16_to_f32_scalar(<2 x half> inreg %a, i32 inreg %
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_3
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB31_4
+; VI-NEXT: s_cbranch_execnz .LBB31_3
; VI-NEXT: .LBB31_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -3216,8 +3077,6 @@ define inreg float @bitcast_v2f16_to_f32_scalar(<2 x half> inreg %a, i32 inreg %
; VI-NEXT: v_or_b32_e32 v0, v0, v1
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB31_3:
-; VI-NEXT: s_branch .LBB31_2
-; VI-NEXT: .LBB31_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -3225,16 +3084,14 @@ define inreg float @bitcast_v2f16_to_f32_scalar(<2 x half> inreg %a, i32 inreg %
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_4
+; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB31_3:
-; GFX9-NEXT: s_branch .LBB31_2
-; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -3243,16 +3100,14 @@ define inreg float @bitcast_v2f16_to_f32_scalar(<2 x half> inreg %a, i32 inreg %
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-NEXT: .LBB31_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
-; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -3360,24 +3215,20 @@ define inreg <2 x bfloat> @bitcast_f32_to_v2bf16_scalar(float inreg %a, i32 inre
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s16, 0xffff0000
; SI-NEXT: s_lshl_b32 s7, s16, 16
-; SI-NEXT: s_cbranch_execnz .LBB33_4
+; SI-NEXT: s_cbranch_execnz .LBB33_3
; SI-NEXT: .LBB33_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v0
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: s_branch .LBB33_5
+; SI-NEXT: s_branch .LBB33_4
; SI-NEXT: .LBB33_3:
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB33_2
-; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s7
; SI-NEXT: v_mov_b32_e32 v1, s6
-; SI-NEXT: .LBB33_5: ; %end
+; SI-NEXT: .LBB33_4: ; %end
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
@@ -3388,15 +3239,13 @@ define inreg <2 x bfloat> @bitcast_f32_to_v2bf16_scalar(float inreg %a, i32 inre
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_4
+; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB33_3:
-; VI-NEXT: s_branch .LBB33_2
-; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -3404,15 +3253,13 @@ define inreg <2 x bfloat> @bitcast_f32_to_v2bf16_scalar(float inreg %a, i32 inre
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
+; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: s_branch .LBB33_2
-; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -3421,16 +3268,14 @@ define inreg <2 x bfloat> @bitcast_f32_to_v2bf16_scalar(float inreg %a, i32 inre
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-NEXT: .LBB33_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: s_branch .LBB33_2
-; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -3643,7 +3488,7 @@ define inreg float @bitcast_v2bf16_to_f32_scalar(<2 x bfloat> inreg %a, i32 inre
; SI-NEXT: s_cmp_lg_u32 s17, 0
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB35_4
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v4
; SI-NEXT: v_lshr_b64 v[0:1], v[2:3], 16
@@ -3657,17 +3502,14 @@ define inreg float @bitcast_v2bf16_to_f32_scalar(<2 x bfloat> inreg %a, i32 inre
; SI-NEXT: v_lshr_b64 v[0:1], v[0:1], 16
; SI-NEXT: .LBB35_3: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB35_4:
-; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: s_branch .LBB35_2
;
; VI-LABEL: bitcast_v2bf16_to_f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB35_4
+; VI-NEXT: s_cbranch_execnz .LBB35_3
; VI-NEXT: .LBB35_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, 0x40c00000
@@ -3690,8 +3532,6 @@ define inreg float @bitcast_v2bf16_to_f32_scalar(<2 x bfloat> inreg %a, i32 inre
; VI-NEXT: v_lshrrev_b64 v[0:1], 16, v[0:1]
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB35_3:
-; VI-NEXT: s_branch .LBB35_2
-; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -3699,9 +3539,9 @@ define inreg float @bitcast_v2bf16_to_f32_scalar(<2 x bfloat> inreg %a, i32 inre
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
+; GFX9-NEXT: s_cbranch_execnz .LBB35_3
; GFX9-NEXT: .LBB35_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s16, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -3726,8 +3566,6 @@ define inreg float @bitcast_v2bf16_to_f32_scalar(<2 x bfloat> inreg %a, i32 inre
; GFX9-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
-; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -3736,10 +3574,10 @@ define inreg float @bitcast_v2bf16_to_f32_scalar(<2 x bfloat> inreg %a, i32 inre
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -3767,8 +3605,6 @@ define inreg float @bitcast_v2bf16_to_f32_scalar(<2 x bfloat> inreg %a, i32 inre
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB35_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB35_2
-; GFX11-TRUE16-NEXT: .LBB35_4:
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -3777,10 +3613,10 @@ define inreg float @bitcast_v2bf16_to_f32_scalar(<2 x bfloat> inreg %a, i32 inre
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB35_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-FAKE16-NEXT: .LBB35_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
@@ -3808,8 +3644,6 @@ define inreg float @bitcast_v2bf16_to_f32_scalar(<2 x bfloat> inreg %a, i32 inre
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB35_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB35_2
-; GFX11-FAKE16-NEXT: .LBB35_4:
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -3904,15 +3738,13 @@ define inreg <1 x i32> @bitcast_f32_to_v1i32_scalar(float inreg %a, i32 inreg %b
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB37_3
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB37_4
+; SI-NEXT: s_cbranch_execnz .LBB37_3
; SI-NEXT: .LBB37_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB37_3:
-; SI-NEXT: s_branch .LBB37_2
-; SI-NEXT: .LBB37_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -3920,15 +3752,13 @@ define inreg <1 x i32> @bitcast_f32_to_v1i32_scalar(float inreg %a, i32 inreg %b
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB37_3
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_4
+; VI-NEXT: s_cbranch_execnz .LBB37_3
; VI-NEXT: .LBB37_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB37_3:
-; VI-NEXT: s_branch .LBB37_2
-; VI-NEXT: .LBB37_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -3936,15 +3766,13 @@ define inreg <1 x i32> @bitcast_f32_to_v1i32_scalar(float inreg %a, i32 inreg %b
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_4
+; GFX9-NEXT: s_cbranch_execnz .LBB37_3
; GFX9-NEXT: .LBB37_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB37_3:
-; GFX9-NEXT: s_branch .LBB37_2
-; GFX9-NEXT: .LBB37_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -3953,16 +3781,14 @@ define inreg <1 x i32> @bitcast_f32_to_v1i32_scalar(float inreg %a, i32 inreg %b
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
; GFX11-NEXT: .LBB37_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: s_branch .LBB37_2
-; GFX11-NEXT: .LBB37_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -4057,7 +3883,7 @@ define inreg float @bitcast_v1i32_to_f32_scalar(<1 x i32> inreg %a, i32 inreg %b
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB39_4
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB39_3
; SI-NEXT: .LBB39_2: ; %cmp.true
@@ -4065,14 +3891,12 @@ define inreg float @bitcast_v1i32_to_f32_scalar(<1 x i32> inreg %a, i32 inreg %b
; SI-NEXT: .LBB39_3: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB39_4:
-; SI-NEXT: s_branch .LBB39_2
;
; VI-LABEL: bitcast_v1i32_to_f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB39_4
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB39_3
; VI-NEXT: .LBB39_2: ; %cmp.true
@@ -4080,14 +3904,12 @@ define inreg float @bitcast_v1i32_to_f32_scalar(<1 x i32> inreg %a, i32 inreg %b
; VI-NEXT: .LBB39_3: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_4:
-; VI-NEXT: s_branch .LBB39_2
;
; GFX9-LABEL: bitcast_v1i32_to_f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB39_3
; GFX9-NEXT: .LBB39_2: ; %cmp.true
@@ -4095,8 +3917,6 @@ define inreg float @bitcast_v1i32_to_f32_scalar(<1 x i32> inreg %a, i32 inreg %b
; GFX9-NEXT: .LBB39_3: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB39_4:
-; GFX9-NEXT: s_branch .LBB39_2
;
; GFX11-LABEL: bitcast_v1i32_to_f32_scalar:
; GFX11: ; %bb.0:
@@ -4304,12 +4124,12 @@ define inreg <4 x i8> @bitcast_f32_to_v4i8_scalar(float inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB41_3
+; SI-NEXT: s_cbranch_scc0 .LBB41_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s7, s16, 24
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_lshr_b32 s8, s16, 8
-; SI-NEXT: s_cbranch_execnz .LBB41_4
+; SI-NEXT: s_cbranch_execnz .LBB41_3
; SI-NEXT: .LBB41_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
@@ -4317,11 +4137,6 @@ define inreg <4 x i8> @bitcast_f32_to_v4i8_scalar(float inreg %a, i32 inreg %b)
; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB41_3:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB41_2
-; SI-NEXT: .LBB41_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s8
; SI-NEXT: v_mov_b32_e32 v3, s7
@@ -4332,12 +4147,12 @@ define inreg <4 x i8> @bitcast_f32_to_v4i8_scalar(float inreg %a, i32 inreg %b)
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB41_3
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s7, s16, 24
; VI-NEXT: s_lshr_b32 s6, s16, 16
; VI-NEXT: s_lshr_b32 s8, s16, 8
-; VI-NEXT: s_cbranch_execnz .LBB41_4
+; VI-NEXT: s_cbranch_execnz .LBB41_3
; VI-NEXT: .LBB41_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
@@ -4345,11 +4160,6 @@ define inreg <4 x i8> @bitcast_f32_to_v4i8_scalar(float inreg %a, i32 inreg %b)
; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB41_3:
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr7
-; VI-NEXT: s_branch .LBB41_2
-; VI-NEXT: .LBB41_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s8
; VI-NEXT: v_mov_b32_e32 v3, s7
@@ -4360,12 +4170,12 @@ define inreg <4 x i8> @bitcast_f32_to_v4i8_scalar(float inreg %a, i32 inreg %b)
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s7, s16, 24
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: s_lshr_b32 s8, s16, 8
-; GFX9-NEXT: s_cbranch_execnz .LBB41_4
+; GFX9-NEXT: s_cbranch_execnz .LBB41_3
; GFX9-NEXT: .LBB41_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v0
@@ -4373,11 +4183,6 @@ define inreg <4 x i8> @bitcast_f32_to_v4i8_scalar(float inreg %a, i32 inreg %b)
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB41_3:
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr7
-; GFX9-NEXT: s_branch .LBB41_2
-; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: v_mov_b32_e32 v3, s7
@@ -4389,13 +4194,13 @@ define inreg <4 x i8> @bitcast_f32_to_v4i8_scalar(float inreg %a, i32 inreg %b)
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s3, s0, 24
; GFX11-NEXT: s_lshr_b32 s2, s0, 16
; GFX11-NEXT: s_lshr_b32 s4, s0, 8
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
; GFX11-NEXT: .LBB41_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -4404,11 +4209,6 @@ define inreg <4 x i8> @bitcast_f32_to_v4i8_scalar(float inreg %a, i32 inreg %b)
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB41_3:
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr2
-; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: s_branch .LBB41_2
-; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s4
; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -4669,7 +4469,7 @@ define inreg float @bitcast_v4i8_to_f32_scalar(<4 x i8> inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB43_4
+; SI-NEXT: s_cbranch_scc0 .LBB43_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -4698,15 +4498,12 @@ define inreg float @bitcast_v4i8_to_f32_scalar(<4 x i8> inreg %a, i32 inreg %b)
; SI-NEXT: .LBB43_3: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB43_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB43_2
;
; VI-LABEL: bitcast_v4i8_to_f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB43_4
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v0, s17
; VI-NEXT: v_mov_b32_e32 v1, 0xc0c0004
@@ -4730,15 +4527,12 @@ define inreg float @bitcast_v4i8_to_f32_scalar(<4 x i8> inreg %a, i32 inreg %b)
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x3000000, v0
; VI-NEXT: .LBB43_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB43_4:
-; VI-NEXT: ; implicit-def: $vgpr0
-; VI-NEXT: s_branch .LBB43_2
;
; GFX9-LABEL: bitcast_v4i8_to_f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v0, s17
; GFX9-NEXT: v_mov_b32_e32 v1, 0xc0c0004
@@ -4762,16 +4556,13 @@ define inreg float @bitcast_v4i8_to_f32_scalar(<4 x i8> inreg %a, i32 inreg %b)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB43_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB43_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0
-; GFX9-NEXT: s_branch .LBB43_2
;
; GFX11-LABEL: bitcast_v4i8_to_f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -4798,9 +4589,6 @@ define inreg float @bitcast_v4i8_to_f32_scalar(<4 x i8> inreg %a, i32 inreg %b)
; GFX11-NEXT: v_or_b32_e32 v0, v1, v0
; GFX11-NEXT: .LBB43_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB43_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0
-; GFX11-NEXT: s_branch .LBB43_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4913,7 +4701,7 @@ define inreg <2 x half> @bitcast_v2i16_to_v2f16_scalar(<2 x i16> inreg %a, i32 i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB45_4
+; SI-NEXT: s_cbranch_scc0 .LBB45_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s6, 16
@@ -4932,15 +4720,12 @@ define inreg <2 x half> @bitcast_v2i16_to_v2f16_scalar(<2 x i16> inreg %a, i32 i
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB45_4:
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB45_2
;
; VI-LABEL: bitcast_v2i16_to_v2f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB45_4
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB45_3
; VI-NEXT: .LBB45_2: ; %cmp.true
@@ -4952,22 +4737,18 @@ define inreg <2 x half> @bitcast_v2i16_to_v2f16_scalar(<2 x i16> inreg %a, i32 i
; VI-NEXT: .LBB45_3: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_4:
-; VI-NEXT: s_branch .LBB45_2
;
; GFX9-LABEL: bitcast_v2i16_to_v2f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB45_4
+; GFX9-NEXT: s_cbranch_execnz .LBB45_3
; GFX9-NEXT: .LBB45_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB45_3:
-; GFX9-NEXT: s_branch .LBB45_2
-; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -4976,16 +4757,14 @@ define inreg <2 x half> @bitcast_v2i16_to_v2f16_scalar(<2 x i16> inreg %a, i32 i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
; GFX11-NEXT: .LBB45_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB45_3:
-; GFX11-NEXT: s_branch .LBB45_2
-; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -5097,9 +4876,9 @@ define inreg <2 x i16> @bitcast_v2f16_to_v2i16_scalar(<2 x half> inreg %a, i32 i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB47_3
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB47_4
+; SI-NEXT: s_cbranch_execnz .LBB47_3
; SI-NEXT: .LBB47_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s6
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -5109,13 +4888,11 @@ define inreg <2 x i16> @bitcast_v2f16_to_v2i16_scalar(<2 x half> inreg %a, i32 i
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; SI-NEXT: v_or_b32_e32 v1, v1, v2
-; SI-NEXT: s_branch .LBB47_5
+; SI-NEXT: s_branch .LBB47_4
; SI-NEXT: .LBB47_3:
-; SI-NEXT: s_branch .LBB47_2
-; SI-NEXT: .LBB47_4:
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: v_mov_b32_e32 v1, s16
-; SI-NEXT: .LBB47_5: ; %end
+; SI-NEXT: .LBB47_4: ; %end
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
@@ -5125,9 +4902,9 @@ define inreg <2 x i16> @bitcast_v2f16_to_v2i16_scalar(<2 x half> inreg %a, i32 i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_4
+; VI-NEXT: s_cbranch_execnz .LBB47_3
; VI-NEXT: .LBB47_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -5137,8 +4914,6 @@ define inreg <2 x i16> @bitcast_v2f16_to_v2i16_scalar(<2 x half> inreg %a, i32 i
; VI-NEXT: v_or_b32_e32 v0, v1, v0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB47_3:
-; VI-NEXT: s_branch .LBB47_2
-; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -5146,16 +4921,14 @@ define inreg <2 x i16> @bitcast_v2f16_to_v2i16_scalar(<2 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
+; GFX9-NEXT: s_cbranch_execnz .LBB47_3
; GFX9-NEXT: .LBB47_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
-; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -5164,16 +4937,14 @@ define inreg <2 x i16> @bitcast_v2f16_to_v2i16_scalar(<2 x half> inreg %a, i32 i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-NEXT: .LBB47_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -5280,7 +5051,7 @@ define inreg <2 x bfloat> @bitcast_v2i16_to_v2bf16_scalar(<2 x i16> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s7, s16, 16
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB49_4
+; SI-NEXT: s_cbranch_scc0 .LBB49_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s6, s16, 16
; SI-NEXT: s_lshl_b32 s8, s7, 16
@@ -5296,16 +5067,12 @@ define inreg <2 x bfloat> @bitcast_v2i16_to_v2bf16_scalar(<2 x i16> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v0, 1.0, s6
; SI-NEXT: v_lshr_b64 v[0:1], v[0:1], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB49_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB49_2
;
; VI-LABEL: bitcast_v2i16_to_v2bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB49_4
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB49_3
; VI-NEXT: .LBB49_2: ; %cmp.true
@@ -5317,22 +5084,18 @@ define inreg <2 x bfloat> @bitcast_v2i16_to_v2bf16_scalar(<2 x i16> inreg %a, i3
; VI-NEXT: .LBB49_3: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB49_4:
-; VI-NEXT: s_branch .LBB49_2
;
; GFX9-LABEL: bitcast_v2i16_to_v2bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
+; GFX9-NEXT: s_cbranch_execnz .LBB49_3
; GFX9-NEXT: .LBB49_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: s_branch .LBB49_2
-; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -5341,16 +5104,14 @@ define inreg <2 x bfloat> @bitcast_v2i16_to_v2bf16_scalar(<2 x i16> inreg %a, i3
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: s_branch .LBB49_2
-; GFX11-NEXT: .LBB49_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -5554,7 +5315,7 @@ define inreg <2 x i16> @bitcast_v2bf16_to_v2i16_scalar(<2 x bfloat> inreg %a, i3
; SI-NEXT: s_cmp_lg_u32 s17, 0
; SI-NEXT: v_mul_f32_e64 v0, 1.0, s5
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s4
-; SI-NEXT: s_cbranch_scc0 .LBB51_4
+; SI-NEXT: s_cbranch_scc0 .LBB51_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v3
@@ -5571,18 +5332,14 @@ define inreg <2 x i16> @bitcast_v2bf16_to_v2i16_scalar(<2 x bfloat> inreg %a, i3
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB51_4:
-; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB51_2
;
; VI-LABEL: bitcast_v2bf16_to_v2i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB51_3
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB51_4
+; VI-NEXT: s_cbranch_execnz .LBB51_3
; VI-NEXT: .LBB51_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, 0x40c00000
@@ -5605,8 +5362,6 @@ define inreg <2 x i16> @bitcast_v2bf16_to_v2i16_scalar(<2 x bfloat> inreg %a, i3
; VI-NEXT: v_lshrrev_b64 v[0:1], 16, v[0:1]
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB51_3:
-; VI-NEXT: s_branch .LBB51_2
-; VI-NEXT: .LBB51_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -5614,9 +5369,9 @@ define inreg <2 x i16> @bitcast_v2bf16_to_v2i16_scalar(<2 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB51_4
+; GFX9-NEXT: s_cbranch_execnz .LBB51_3
; GFX9-NEXT: .LBB51_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s16, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -5640,8 +5395,6 @@ define inreg <2 x i16> @bitcast_v2bf16_to_v2i16_scalar(<2 x bfloat> inreg %a, i3
; GFX9-NEXT: v_and_or_b32 v0, v1, v2, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB51_3:
-; GFX9-NEXT: s_branch .LBB51_2
-; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -5650,10 +5403,10 @@ define inreg <2 x i16> @bitcast_v2bf16_to_v2i16_scalar(<2 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s0, 16
; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xffff0000
@@ -5677,8 +5430,6 @@ define inreg <2 x i16> @bitcast_v2bf16_to_v2i16_scalar(<2 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.h
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB51_2
-; GFX11-TRUE16-NEXT: .LBB51_4:
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -5687,10 +5438,10 @@ define inreg <2 x i16> @bitcast_v2bf16_to_v2i16_scalar(<2 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-FAKE16-NEXT: .LBB51_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
@@ -5716,8 +5467,6 @@ define inreg <2 x i16> @bitcast_v2bf16_to_v2i16_scalar(<2 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v1, v0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB51_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB51_2
-; GFX11-FAKE16-NEXT: .LBB51_4:
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -5835,7 +5584,7 @@ define inreg <1 x i32> @bitcast_v2i16_to_v1i32_scalar(<2 x i16> inreg %a, i32 in
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB53_4
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s6, 16
@@ -5850,15 +5599,12 @@ define inreg <1 x i32> @bitcast_v2i16_to_v1i32_scalar(<2 x i16> inreg %a, i32 in
; SI-NEXT: .LBB53_3: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s7
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB53_4:
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB53_2
;
; VI-LABEL: bitcast_v2i16_to_v1i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB53_4
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB53_3
; VI-NEXT: .LBB53_2: ; %cmp.true
@@ -5870,22 +5616,18 @@ define inreg <1 x i32> @bitcast_v2i16_to_v1i32_scalar(<2 x i16> inreg %a, i32 in
; VI-NEXT: .LBB53_3: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB53_4:
-; VI-NEXT: s_branch .LBB53_2
;
; GFX9-LABEL: bitcast_v2i16_to_v1i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB53_4
+; GFX9-NEXT: s_cbranch_execnz .LBB53_3
; GFX9-NEXT: .LBB53_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB53_3:
-; GFX9-NEXT: s_branch .LBB53_2
-; GFX9-NEXT: .LBB53_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -5894,16 +5636,14 @@ define inreg <1 x i32> @bitcast_v2i16_to_v1i32_scalar(<2 x i16> inreg %a, i32 in
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-NEXT: .LBB53_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB53_3:
-; GFX11-NEXT: s_branch .LBB53_2
-; GFX11-NEXT: .LBB53_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -6006,7 +5746,7 @@ define inreg <2 x i16> @bitcast_v1i32_to_v2i16_scalar(<1 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB55_4
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_cbranch_execnz .LBB55_3
@@ -6019,15 +5759,12 @@ define inreg <2 x i16> @bitcast_v1i32_to_v2i16_scalar(<1 x i32> inreg %a, i32 in
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB55_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB55_2
;
; VI-LABEL: bitcast_v1i32_to_v2i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_4
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB55_3
; VI-NEXT: .LBB55_2: ; %cmp.true
@@ -6035,14 +5772,12 @@ define inreg <2 x i16> @bitcast_v1i32_to_v2i16_scalar(<1 x i32> inreg %a, i32 in
; VI-NEXT: .LBB55_3: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB55_4:
-; VI-NEXT: s_branch .LBB55_2
;
; GFX9-LABEL: bitcast_v1i32_to_v2i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB55_3
; GFX9-NEXT: .LBB55_2: ; %cmp.true
@@ -6050,8 +5785,6 @@ define inreg <2 x i16> @bitcast_v1i32_to_v2i16_scalar(<1 x i32> inreg %a, i32 in
; GFX9-NEXT: .LBB55_3: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB55_4:
-; GFX9-NEXT: s_branch .LBB55_2
;
; GFX11-LABEL: bitcast_v1i32_to_v2i16_scalar:
; GFX11: ; %bb.0:
@@ -6275,7 +6008,7 @@ define inreg <4 x i8> @bitcast_v2i16_to_v4i8_scalar(<2 x i16> inreg %a, i32 inre
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB57_4
+; SI-NEXT: s_cbranch_scc0 .LBB57_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s6, 16
@@ -6298,17 +6031,12 @@ define inreg <4 x i8> @bitcast_v2i16_to_v4i8_scalar(<2 x i16> inreg %a, i32 inre
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v3, s9
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB57_4:
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: s_branch .LBB57_2
;
; VI-LABEL: bitcast_v2i16_to_v4i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s8, s16, 24
; VI-NEXT: s_lshr_b32 s6, s16, 16
@@ -6330,23 +6058,17 @@ define inreg <4 x i8> @bitcast_v2i16_to_v4i8_scalar(<2 x i16> inreg %a, i32 inre
; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: v_mov_b32_e32 v3, s8
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: ; implicit-def: $sgpr7
-; VI-NEXT: ; implicit-def: $sgpr9
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v2i16_to_v4i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s7, s16, 24
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: s_lshr_b32 s8, s16, 8
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
+; GFX9-NEXT: s_cbranch_execnz .LBB57_3
; GFX9-NEXT: .LBB57_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v0
@@ -6354,11 +6076,6 @@ define inreg <4 x i8> @bitcast_v2i16_to_v4i8_scalar(<2 x i16> inreg %a, i32 inre
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr7
-; GFX9-NEXT: s_branch .LBB57_2
-; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: v_mov_b32_e32 v3, s7
@@ -6370,13 +6087,13 @@ define inreg <4 x i8> @bitcast_v2i16_to_v4i8_scalar(<2 x i16> inreg %a, i32 inre
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s3, s0, 24
; GFX11-NEXT: s_lshr_b32 s2, s0, 16
; GFX11-NEXT: s_lshr_b32 s4, s0, 8
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-NEXT: .LBB57_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -6385,11 +6102,6 @@ define inreg <4 x i8> @bitcast_v2i16_to_v4i8_scalar(<2 x i16> inreg %a, i32 inre
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB57_3:
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr2
-; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: s_branch .LBB57_2
-; GFX11-NEXT: .LBB57_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s4
; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -6650,7 +6362,7 @@ define inreg <2 x i16> @bitcast_v4i8_to_v2i16_scalar(<4 x i8> inreg %a, i32 inre
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB59_4
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -6684,16 +6396,12 @@ define inreg <2 x i16> @bitcast_v4i8_to_v2i16_scalar(<4 x i8> inreg %a, i32 inre
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB59_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB59_2
;
; VI-LABEL: bitcast_v4i8_to_v2i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_4
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v0, s17
; VI-NEXT: v_mov_b32_e32 v1, 0xc0c0004
@@ -6717,15 +6425,12 @@ define inreg <2 x i16> @bitcast_v4i8_to_v2i16_scalar(<4 x i8> inreg %a, i32 inre
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x3000000, v0
; VI-NEXT: .LBB59_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB59_4:
-; VI-NEXT: ; implicit-def: $vgpr0
-; VI-NEXT: s_branch .LBB59_2
;
; GFX9-LABEL: bitcast_v4i8_to_v2i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v0, s17
; GFX9-NEXT: v_mov_b32_e32 v1, 0xc0c0004
@@ -6749,16 +6454,13 @@ define inreg <2 x i16> @bitcast_v4i8_to_v2i16_scalar(<4 x i8> inreg %a, i32 inre
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB59_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB59_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0
-; GFX9-NEXT: s_branch .LBB59_2
;
; GFX11-LABEL: bitcast_v4i8_to_v2i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -6785,9 +6487,6 @@ define inreg <2 x i16> @bitcast_v4i8_to_v2i16_scalar(<4 x i8> inreg %a, i32 inre
; GFX11-NEXT: v_or_b32_e32 v0, v1, v0
; GFX11-NEXT: .LBB59_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB59_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0
-; GFX11-NEXT: s_branch .LBB59_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6906,11 +6605,11 @@ define inreg <2 x bfloat> @bitcast_v2f16_to_v2bf16_scalar(<2 x half> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB61_3
+; SI-NEXT: s_cbranch_scc0 .LBB61_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s7, s16, 16
; SI-NEXT: s_lshl_b32 s8, s6, 16
-; SI-NEXT: s_cbranch_execnz .LBB61_4
+; SI-NEXT: s_cbranch_execnz .LBB61_3
; SI-NEXT: .LBB61_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s6
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -6920,15 +6619,11 @@ define inreg <2 x bfloat> @bitcast_v2f16_to_v2bf16_scalar(<2 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v2, v0
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; SI-NEXT: s_branch .LBB61_5
+; SI-NEXT: s_branch .LBB61_4
; SI-NEXT: .LBB61_3:
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB61_2
-; SI-NEXT: .LBB61_4:
; SI-NEXT: v_mov_b32_e32 v1, s8
; SI-NEXT: v_mov_b32_e32 v0, s7
-; SI-NEXT: .LBB61_5: ; %end
+; SI-NEXT: .LBB61_4: ; %end
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
@@ -6939,9 +6634,9 @@ define inreg <2 x bfloat> @bitcast_v2f16_to_v2bf16_scalar(<2 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB61_3
+; VI-NEXT: s_cbranch_scc0 .LBB61_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB61_4
+; VI-NEXT: s_cbranch_execnz .LBB61_3
; VI-NEXT: .LBB61_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -6951,8 +6646,6 @@ define inreg <2 x bfloat> @bitcast_v2f16_to_v2bf16_scalar(<2 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v1, v0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB61_3:
-; VI-NEXT: s_branch .LBB61_2
-; VI-NEXT: .LBB61_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -6960,16 +6653,14 @@ define inreg <2 x bfloat> @bitcast_v2f16_to_v2bf16_scalar(<2 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB61_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB61_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB61_4
+; GFX9-NEXT: s_cbranch_execnz .LBB61_3
; GFX9-NEXT: .LBB61_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB61_3:
-; GFX9-NEXT: s_branch .LBB61_2
-; GFX9-NEXT: .LBB61_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -6978,16 +6669,14 @@ define inreg <2 x bfloat> @bitcast_v2f16_to_v2bf16_scalar(<2 x half> inreg %a, i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB61_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB61_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB61_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB61_3
; GFX11-NEXT: .LBB61_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB61_3:
-; GFX11-NEXT: s_branch .LBB61_2
-; GFX11-NEXT: .LBB61_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -7198,7 +6887,7 @@ define inreg <2 x half> @bitcast_v2bf16_to_v2f16_scalar(<2 x bfloat> inreg %a, i
; SI-NEXT: s_cmp_lg_u32 s17, 0
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v0, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB63_4
+; SI-NEXT: s_cbranch_scc0 .LBB63_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v4
; SI-NEXT: v_lshr_b64 v[2:3], v[0:1], 16
@@ -7215,17 +6904,14 @@ define inreg <2 x half> @bitcast_v2bf16_to_v2f16_scalar(<2 x bfloat> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB63_4:
-; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: s_branch .LBB63_2
;
; VI-LABEL: bitcast_v2bf16_to_v2f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB63_3
+; VI-NEXT: s_cbranch_scc0 .LBB63_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB63_4
+; VI-NEXT: s_cbranch_execnz .LBB63_3
; VI-NEXT: .LBB63_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, 0x40c00000
@@ -7248,8 +6934,6 @@ define inreg <2 x half> @bitcast_v2bf16_to_v2f16_scalar(<2 x bfloat> inreg %a, i
; VI-NEXT: v_lshrrev_b64 v[0:1], 16, v[0:1]
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB63_3:
-; VI-NEXT: s_branch .LBB63_2
-; VI-NEXT: .LBB63_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -7257,9 +6941,9 @@ define inreg <2 x half> @bitcast_v2bf16_to_v2f16_scalar(<2 x bfloat> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB63_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB63_4
+; GFX9-NEXT: s_cbranch_execnz .LBB63_3
; GFX9-NEXT: .LBB63_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s16, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -7284,8 +6968,6 @@ define inreg <2 x half> @bitcast_v2bf16_to_v2f16_scalar(<2 x bfloat> inreg %a, i
; GFX9-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB63_3:
-; GFX9-NEXT: s_branch .LBB63_2
-; GFX9-NEXT: .LBB63_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -7294,10 +6976,10 @@ define inreg <2 x half> @bitcast_v2bf16_to_v2f16_scalar(<2 x bfloat> inreg %a, i
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB63_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB63_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB63_3
; GFX11-TRUE16-NEXT: .LBB63_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -7325,8 +7007,6 @@ define inreg <2 x half> @bitcast_v2bf16_to_v2f16_scalar(<2 x bfloat> inreg %a, i
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB63_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB63_2
-; GFX11-TRUE16-NEXT: .LBB63_4:
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -7335,10 +7015,10 @@ define inreg <2 x half> @bitcast_v2bf16_to_v2f16_scalar(<2 x bfloat> inreg %a, i
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB63_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB63_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB63_3
; GFX11-FAKE16-NEXT: .LBB63_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
@@ -7366,8 +7046,6 @@ define inreg <2 x half> @bitcast_v2bf16_to_v2f16_scalar(<2 x bfloat> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB63_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB63_2
-; GFX11-FAKE16-NEXT: .LBB63_4:
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -7485,12 +7163,12 @@ define inreg <1 x i32> @bitcast_v2f16_to_v1i32_scalar(<2 x half> inreg %a, i32 i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB65_3
+; SI-NEXT: s_cbranch_scc0 .LBB65_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s6, 16
; SI-NEXT: s_or_b32 s7, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB65_4
+; SI-NEXT: s_cbranch_execnz .LBB65_3
; SI-NEXT: .LBB65_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s6
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -7502,9 +7180,6 @@ define inreg <1 x i32> @bitcast_v2f16_to_v1i32_scalar(<2 x half> inreg %a, i32 i
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB65_3:
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB65_2
-; SI-NEXT: .LBB65_4:
; SI-NEXT: v_mov_b32_e32 v0, s7
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -7512,9 +7187,9 @@ define inreg <1 x i32> @bitcast_v2f16_to_v1i32_scalar(<2 x half> inreg %a, i32 i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB65_3
+; VI-NEXT: s_cbranch_scc0 .LBB65_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB65_4
+; VI-NEXT: s_cbranch_execnz .LBB65_3
; VI-NEXT: .LBB65_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -7524,8 +7199,6 @@ define inreg <1 x i32> @bitcast_v2f16_to_v1i32_scalar(<2 x half> inreg %a, i32 i
; VI-NEXT: v_or_b32_e32 v0, v0, v1
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB65_3:
-; VI-NEXT: s_branch .LBB65_2
-; VI-NEXT: .LBB65_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -7533,16 +7206,14 @@ define inreg <1 x i32> @bitcast_v2f16_to_v1i32_scalar(<2 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB65_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB65_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB65_4
+; GFX9-NEXT: s_cbranch_execnz .LBB65_3
; GFX9-NEXT: .LBB65_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB65_3:
-; GFX9-NEXT: s_branch .LBB65_2
-; GFX9-NEXT: .LBB65_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -7551,16 +7222,14 @@ define inreg <1 x i32> @bitcast_v2f16_to_v1i32_scalar(<2 x half> inreg %a, i32 i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB65_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB65_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB65_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB65_3
; GFX11-NEXT: .LBB65_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB65_3:
-; GFX11-NEXT: s_branch .LBB65_2
-; GFX11-NEXT: .LBB65_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -7663,7 +7332,7 @@ define inreg <2 x half> @bitcast_v1i32_to_v2f16_scalar(<1 x i32> inreg %a, i32 i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB67_4
+; SI-NEXT: s_cbranch_scc0 .LBB67_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_cbranch_execnz .LBB67_3
@@ -7676,15 +7345,12 @@ define inreg <2 x half> @bitcast_v1i32_to_v2f16_scalar(<1 x i32> inreg %a, i32 i
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB67_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB67_2
;
; VI-LABEL: bitcast_v1i32_to_v2f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB67_4
+; VI-NEXT: s_cbranch_scc0 .LBB67_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB67_3
; VI-NEXT: .LBB67_2: ; %cmp.true
@@ -7692,14 +7358,12 @@ define inreg <2 x half> @bitcast_v1i32_to_v2f16_scalar(<1 x i32> inreg %a, i32 i
; VI-NEXT: .LBB67_3: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB67_4:
-; VI-NEXT: s_branch .LBB67_2
;
; GFX9-LABEL: bitcast_v1i32_to_v2f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB67_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB67_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB67_3
; GFX9-NEXT: .LBB67_2: ; %cmp.true
@@ -7707,8 +7371,6 @@ define inreg <2 x half> @bitcast_v1i32_to_v2f16_scalar(<1 x i32> inreg %a, i32 i
; GFX9-NEXT: .LBB67_3: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB67_4:
-; GFX9-NEXT: s_branch .LBB67_2
;
; GFX11-LABEL: bitcast_v1i32_to_v2f16_scalar:
; GFX11: ; %bb.0:
@@ -7931,14 +7593,14 @@ define inreg <4 x i8> @bitcast_v2f16_to_v4i8_scalar(<2 x half> inreg %a, i32 inr
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB69_3
+; SI-NEXT: s_cbranch_scc0 .LBB69_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s6, 16
; SI-NEXT: s_or_b32 s7, s4, s5
; SI-NEXT: s_lshr_b32 s8, s7, 8
; SI-NEXT: s_bfe_u32 s9, s6, 0x80008
-; SI-NEXT: s_cbranch_execnz .LBB69_4
+; SI-NEXT: s_cbranch_execnz .LBB69_3
; SI-NEXT: .LBB69_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s6
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -7952,11 +7614,6 @@ define inreg <4 x i8> @bitcast_v2f16_to_v4i8_scalar(<2 x half> inreg %a, i32 inr
; SI-NEXT: v_bfe_u32 v3, v2, 8, 8
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB69_3:
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: s_branch .LBB69_2
-; SI-NEXT: .LBB69_4:
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v3, s9
; SI-NEXT: v_mov_b32_e32 v0, s7
@@ -7967,12 +7624,12 @@ define inreg <4 x i8> @bitcast_v2f16_to_v4i8_scalar(<2 x half> inreg %a, i32 inr
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB69_3
+; VI-NEXT: s_cbranch_scc0 .LBB69_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s6, s16, 24
; VI-NEXT: s_lshr_b32 s8, s16, 16
; VI-NEXT: s_lshr_b32 s7, s16, 8
-; VI-NEXT: s_cbranch_execnz .LBB69_4
+; VI-NEXT: s_cbranch_execnz .LBB69_3
; VI-NEXT: .LBB69_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -7984,11 +7641,6 @@ define inreg <4 x i8> @bitcast_v2f16_to_v4i8_scalar(<2 x half> inreg %a, i32 inr
; VI-NEXT: v_bfe_u32 v3, v2, 8, 8
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB69_3:
-; VI-NEXT: ; implicit-def: $sgpr7
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB69_2
-; VI-NEXT: .LBB69_4:
; VI-NEXT: v_mov_b32_e32 v2, s8
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v3, s6
@@ -7999,12 +7651,12 @@ define inreg <4 x i8> @bitcast_v2f16_to_v4i8_scalar(<2 x half> inreg %a, i32 inr
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB69_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB69_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s7, s16, 24
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: s_lshr_b32 s8, s16, 8
-; GFX9-NEXT: s_cbranch_execnz .LBB69_4
+; GFX9-NEXT: s_cbranch_execnz .LBB69_3
; GFX9-NEXT: .LBB69_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
@@ -8013,11 +7665,6 @@ define inreg <4 x i8> @bitcast_v2f16_to_v4i8_scalar(<2 x half> inreg %a, i32 inr
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB69_3:
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr7
-; GFX9-NEXT: s_branch .LBB69_2
-; GFX9-NEXT: .LBB69_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: v_mov_b32_e32 v3, s7
@@ -8029,13 +7676,13 @@ define inreg <4 x i8> @bitcast_v2f16_to_v4i8_scalar(<2 x half> inreg %a, i32 inr
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB69_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB69_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s3, s0, 24
; GFX11-NEXT: s_lshr_b32 s2, s0, 16
; GFX11-NEXT: s_lshr_b32 s4, s0, 8
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB69_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB69_3
; GFX11-NEXT: .LBB69_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -8044,11 +7691,6 @@ define inreg <4 x i8> @bitcast_v2f16_to_v4i8_scalar(<2 x half> inreg %a, i32 inr
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB69_3:
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr2
-; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: s_branch .LBB69_2
-; GFX11-NEXT: .LBB69_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s4
; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -8309,7 +7951,7 @@ define inreg <2 x half> @bitcast_v4i8_to_v2f16_scalar(<4 x i8> inreg %a, i32 inr
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB71_4
+; SI-NEXT: s_cbranch_scc0 .LBB71_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -8343,16 +7985,12 @@ define inreg <2 x half> @bitcast_v4i8_to_v2f16_scalar(<4 x i8> inreg %a, i32 inr
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB71_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB71_2
;
; VI-LABEL: bitcast_v4i8_to_v2f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB71_4
+; VI-NEXT: s_cbranch_scc0 .LBB71_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v0, s17
; VI-NEXT: v_mov_b32_e32 v1, 0xc0c0004
@@ -8376,15 +8014,12 @@ define inreg <2 x half> @bitcast_v4i8_to_v2f16_scalar(<4 x i8> inreg %a, i32 inr
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x3000000, v0
; VI-NEXT: .LBB71_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB71_4:
-; VI-NEXT: ; implicit-def: $vgpr0
-; VI-NEXT: s_branch .LBB71_2
;
; GFX9-LABEL: bitcast_v4i8_to_v2f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB71_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB71_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v0, s17
; GFX9-NEXT: v_mov_b32_e32 v1, 0xc0c0004
@@ -8408,16 +8043,13 @@ define inreg <2 x half> @bitcast_v4i8_to_v2f16_scalar(<4 x i8> inreg %a, i32 inr
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB71_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB71_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0
-; GFX9-NEXT: s_branch .LBB71_2
;
; GFX11-LABEL: bitcast_v4i8_to_v2f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB71_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB71_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -8444,9 +8076,6 @@ define inreg <2 x half> @bitcast_v4i8_to_v2f16_scalar(<4 x i8> inreg %a, i32 inr
; GFX11-NEXT: v_or_b32_e32 v0, v1, v0
; GFX11-NEXT: .LBB71_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB71_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0
-; GFX11-NEXT: s_branch .LBB71_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8657,7 +8286,7 @@ define inreg <1 x i32> @bitcast_v2bf16_to_v1i32_scalar(<2 x bfloat> inreg %a, i3
; SI-NEXT: s_cmp_lg_u32 s17, 0
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB73_4
+; SI-NEXT: s_cbranch_scc0 .LBB73_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v4
; SI-NEXT: v_lshr_b64 v[0:1], v[2:3], 16
@@ -8671,17 +8300,14 @@ define inreg <1 x i32> @bitcast_v2bf16_to_v1i32_scalar(<2 x bfloat> inreg %a, i3
; SI-NEXT: v_lshr_b64 v[0:1], v[0:1], 16
; SI-NEXT: .LBB73_3: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB73_4:
-; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: s_branch .LBB73_2
;
; VI-LABEL: bitcast_v2bf16_to_v1i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB73_3
+; VI-NEXT: s_cbranch_scc0 .LBB73_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB73_4
+; VI-NEXT: s_cbranch_execnz .LBB73_3
; VI-NEXT: .LBB73_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, 0x40c00000
@@ -8704,8 +8330,6 @@ define inreg <1 x i32> @bitcast_v2bf16_to_v1i32_scalar(<2 x bfloat> inreg %a, i3
; VI-NEXT: v_lshrrev_b64 v[0:1], 16, v[0:1]
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB73_3:
-; VI-NEXT: s_branch .LBB73_2
-; VI-NEXT: .LBB73_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
;
@@ -8713,9 +8337,9 @@ define inreg <1 x i32> @bitcast_v2bf16_to_v1i32_scalar(<2 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB73_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB73_4
+; GFX9-NEXT: s_cbranch_execnz .LBB73_3
; GFX9-NEXT: .LBB73_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s16, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -8740,8 +8364,6 @@ define inreg <1 x i32> @bitcast_v2bf16_to_v1i32_scalar(<2 x bfloat> inreg %a, i3
; GFX9-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB73_3:
-; GFX9-NEXT: s_branch .LBB73_2
-; GFX9-NEXT: .LBB73_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -8750,10 +8372,10 @@ define inreg <1 x i32> @bitcast_v2bf16_to_v1i32_scalar(<2 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB73_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB73_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB73_3
; GFX11-TRUE16-NEXT: .LBB73_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -8781,8 +8403,6 @@ define inreg <1 x i32> @bitcast_v2bf16_to_v1i32_scalar(<2 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB73_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB73_2
-; GFX11-TRUE16-NEXT: .LBB73_4:
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -8791,10 +8411,10 @@ define inreg <1 x i32> @bitcast_v2bf16_to_v1i32_scalar(<2 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB73_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB73_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB73_3
; GFX11-FAKE16-NEXT: .LBB73_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
@@ -8822,8 +8442,6 @@ define inreg <1 x i32> @bitcast_v2bf16_to_v1i32_scalar(<2 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB73_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB73_2
-; GFX11-FAKE16-NEXT: .LBB73_4:
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -8931,7 +8549,7 @@ define inreg <2 x bfloat> @bitcast_v1i32_to_v2bf16_scalar(<1 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB75_4
+; SI-NEXT: s_cbranch_scc0 .LBB75_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s7, s16, 0xffff0000
; SI-NEXT: s_lshl_b32 s6, s16, 16
@@ -8946,16 +8564,12 @@ define inreg <2 x bfloat> @bitcast_v1i32_to_v2bf16_scalar(<1 x i32> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v0, 1.0, s6
; SI-NEXT: v_lshr_b64 v[0:1], v[0:1], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB75_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB75_2
;
; VI-LABEL: bitcast_v1i32_to_v2bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB75_4
+; VI-NEXT: s_cbranch_scc0 .LBB75_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB75_3
; VI-NEXT: .LBB75_2: ; %cmp.true
@@ -8963,14 +8577,12 @@ define inreg <2 x bfloat> @bitcast_v1i32_to_v2bf16_scalar(<1 x i32> inreg %a, i3
; VI-NEXT: .LBB75_3: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB75_4:
-; VI-NEXT: s_branch .LBB75_2
;
; GFX9-LABEL: bitcast_v1i32_to_v2bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB75_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB75_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB75_3
; GFX9-NEXT: .LBB75_2: ; %cmp.true
@@ -8978,8 +8590,6 @@ define inreg <2 x bfloat> @bitcast_v1i32_to_v2bf16_scalar(<1 x i32> inreg %a, i3
; GFX9-NEXT: .LBB75_3: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB75_4:
-; GFX9-NEXT: s_branch .LBB75_2
;
; GFX11-LABEL: bitcast_v1i32_to_v2bf16_scalar:
; GFX11: ; %bb.0:
@@ -9276,7 +8886,7 @@ define inreg <4 x i8> @bitcast_v2bf16_to_v4i8_scalar(<2 x bfloat> inreg %a, i32
; SI-NEXT: s_cmp_lg_u32 s17, 0
; SI-NEXT: v_mul_f32_e64 v0, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB77_4
+; SI-NEXT: s_cbranch_scc0 .LBB77_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
; SI-NEXT: v_lshr_b64 v[4:5], v[1:2], 16
@@ -9296,22 +8906,17 @@ define inreg <4 x i8> @bitcast_v2bf16_to_v4i8_scalar(<2 x bfloat> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v0, v4
; SI-NEXT: v_mov_b32_e32 v1, v5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB77_4:
-; SI-NEXT: ; implicit-def: $vgpr4
-; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: s_branch .LBB77_2
;
; VI-LABEL: bitcast_v2bf16_to_v4i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB77_3
+; VI-NEXT: s_cbranch_scc0 .LBB77_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s6, s16, 24
; VI-NEXT: s_lshr_b32 s8, s16, 16
; VI-NEXT: s_lshr_b32 s7, s16, 8
-; VI-NEXT: s_cbranch_execnz .LBB77_4
+; VI-NEXT: s_cbranch_execnz .LBB77_3
; VI-NEXT: .LBB77_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -9337,11 +8942,6 @@ define inreg <4 x i8> @bitcast_v2bf16_to_v4i8_scalar(<2 x bfloat> inreg %a, i32
; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v4
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB77_3:
-; VI-NEXT: ; implicit-def: $sgpr7
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB77_2
-; VI-NEXT: .LBB77_4:
; VI-NEXT: v_mov_b32_e32 v2, s8
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v3, s6
@@ -9352,12 +8952,12 @@ define inreg <4 x i8> @bitcast_v2bf16_to_v4i8_scalar(<2 x bfloat> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB77_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s6, s16, 24
; GFX9-NEXT: s_lshr_b32 s8, s16, 16
; GFX9-NEXT: s_lshr_b32 s7, s16, 8
-; GFX9-NEXT: s_cbranch_execnz .LBB77_4
+; GFX9-NEXT: s_cbranch_execnz .LBB77_3
; GFX9-NEXT: .LBB77_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s16, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -9384,11 +8984,6 @@ define inreg <4 x i8> @bitcast_v2bf16_to_v4i8_scalar(<2 x bfloat> inreg %a, i32
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB77_3:
-; GFX9-NEXT: ; implicit-def: $sgpr7
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB77_2
-; GFX9-NEXT: .LBB77_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v2, s8
; GFX9-NEXT: v_mov_b32_e32 v3, s6
@@ -9400,13 +8995,13 @@ define inreg <4 x i8> @bitcast_v2bf16_to_v4i8_scalar(<2 x bfloat> inreg %a, i32
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB77_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s2, s0, 24
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s3, s0, 8
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB77_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB77_3
; GFX11-TRUE16-NEXT: .LBB77_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s0, 16
; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xffff0000
@@ -9438,11 +9033,6 @@ define inreg <4 x i8> @bitcast_v2bf16_to_v4i8_scalar(<2 x bfloat> inreg %a, i32
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB77_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr3
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr2
-; GFX11-TRUE16-NEXT: s_branch .LBB77_2
-; GFX11-TRUE16-NEXT: .LBB77_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s2
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v1, s3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -9452,13 +9042,13 @@ define inreg <4 x i8> @bitcast_v2bf16_to_v4i8_scalar(<2 x bfloat> inreg %a, i32
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB77_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s2, s0, 24
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s3, s0, 8
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB77_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB77_3
; GFX11-FAKE16-NEXT: .LBB77_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
@@ -9490,11 +9080,6 @@ define inreg <4 x i8> @bitcast_v2bf16_to_v4i8_scalar(<2 x bfloat> inreg %a, i32
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB77_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr3
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr2
-; GFX11-FAKE16-NEXT: s_branch .LBB77_2
-; GFX11-FAKE16-NEXT: .LBB77_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s2
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v1, s3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
@@ -9752,7 +9337,7 @@ define inreg <2 x bfloat> @bitcast_v4i8_to_v2bf16_scalar(<4 x i8> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB79_4
+; SI-NEXT: s_cbranch_scc0 .LBB79_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s4, s4, 16
@@ -9782,16 +9367,12 @@ define inreg <2 x bfloat> @bitcast_v4i8_to_v2bf16_scalar(<4 x i8> inreg %a, i32
; SI-NEXT: v_mul_f32_e64 v0, 1.0, s6
; SI-NEXT: v_lshr_b64 v[0:1], v[0:1], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB79_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB79_2
;
; VI-LABEL: bitcast_v4i8_to_v2bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB79_4
+; VI-NEXT: s_cbranch_scc0 .LBB79_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v0, s17
; VI-NEXT: v_mov_b32_e32 v1, 0xc0c0004
@@ -9815,15 +9396,12 @@ define inreg <2 x bfloat> @bitcast_v4i8_to_v2bf16_scalar(<4 x i8> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x3000000, v0
; VI-NEXT: .LBB79_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB79_4:
-; VI-NEXT: ; implicit-def: $vgpr0
-; VI-NEXT: s_branch .LBB79_2
;
; GFX9-LABEL: bitcast_v4i8_to_v2bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB79_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB79_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v0, s17
; GFX9-NEXT: v_mov_b32_e32 v1, 0xc0c0004
@@ -9847,16 +9425,13 @@ define inreg <2 x bfloat> @bitcast_v4i8_to_v2bf16_scalar(<4 x i8> inreg %a, i32
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB79_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB79_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0
-; GFX9-NEXT: s_branch .LBB79_2
;
; GFX11-LABEL: bitcast_v4i8_to_v2bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB79_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB79_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -9883,9 +9458,6 @@ define inreg <2 x bfloat> @bitcast_v4i8_to_v2bf16_scalar(<4 x i8> inreg %a, i32
; GFX11-NEXT: v_or_b32_e32 v0, v1, v0
; GFX11-NEXT: .LBB79_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB79_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0
-; GFX11-NEXT: s_branch .LBB79_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10074,7 +9646,7 @@ define inreg <4 x i8> @bitcast_v1i32_to_v4i8_scalar(<1 x i32> inreg %a, i32 inre
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB81_4
+; SI-NEXT: s_cbranch_scc0 .LBB81_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s6, s16, 24
; SI-NEXT: s_lshr_b32 s7, s16, 16
@@ -10091,17 +9663,12 @@ define inreg <4 x i8> @bitcast_v1i32_to_v4i8_scalar(<1 x i32> inreg %a, i32 inre
; SI-NEXT: v_mov_b32_e32 v2, s7
; SI-NEXT: v_mov_b32_e32 v3, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB81_4:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB81_2
;
; VI-LABEL: bitcast_v1i32_to_v4i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB81_4
+; VI-NEXT: s_cbranch_scc0 .LBB81_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s6, s16, 24
; VI-NEXT: s_lshr_b32 s7, s16, 16
@@ -10118,17 +9685,12 @@ define inreg <4 x i8> @bitcast_v1i32_to_v4i8_scalar(<1 x i32> inreg %a, i32 inre
; VI-NEXT: v_mov_b32_e32 v2, s7
; VI-NEXT: v_mov_b32_e32 v3, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB81_4:
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr7
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB81_2
;
; GFX9-LABEL: bitcast_v1i32_to_v4i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB81_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB81_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s6, s16, 24
; GFX9-NEXT: s_lshr_b32 s7, s16, 16
@@ -10145,18 +9707,13 @@ define inreg <4 x i8> @bitcast_v1i32_to_v4i8_scalar(<1 x i32> inreg %a, i32 inre
; GFX9-NEXT: v_mov_b32_e32 v2, s7
; GFX9-NEXT: v_mov_b32_e32 v3, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB81_4:
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr7
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB81_2
;
; GFX11-LABEL: bitcast_v1i32_to_v4i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB81_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB81_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s1, s0, 24
; GFX11-NEXT: s_lshr_b32 s2, s0, 16
@@ -10174,11 +9731,6 @@ define inreg <4 x i8> @bitcast_v1i32_to_v4i8_scalar(<1 x i32> inreg %a, i32 inre
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s3
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB81_4:
-; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: ; implicit-def: $sgpr2
-; GFX11-NEXT: ; implicit-def: $sgpr1
-; GFX11-NEXT: s_branch .LBB81_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10436,7 +9988,7 @@ define inreg <1 x i32> @bitcast_v4i8_to_v1i32_scalar(<4 x i8> inreg %a, i32 inre
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB83_4
+; SI-NEXT: s_cbranch_scc0 .LBB83_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -10465,15 +10017,12 @@ define inreg <1 x i32> @bitcast_v4i8_to_v1i32_scalar(<4 x i8> inreg %a, i32 inre
; SI-NEXT: .LBB83_3: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB83_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB83_2
;
; VI-LABEL: bitcast_v4i8_to_v1i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB83_4
+; VI-NEXT: s_cbranch_scc0 .LBB83_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v0, s17
; VI-NEXT: v_mov_b32_e32 v1, 0xc0c0004
@@ -10497,15 +10046,12 @@ define inreg <1 x i32> @bitcast_v4i8_to_v1i32_scalar(<4 x i8> inreg %a, i32 inre
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x3000000, v0
; VI-NEXT: .LBB83_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB83_4:
-; VI-NEXT: ; implicit-def: $vgpr0
-; VI-NEXT: s_branch .LBB83_2
;
; GFX9-LABEL: bitcast_v4i8_to_v1i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB83_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB83_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v0, s17
; GFX9-NEXT: v_mov_b32_e32 v1, 0xc0c0004
@@ -10529,16 +10075,13 @@ define inreg <1 x i32> @bitcast_v4i8_to_v1i32_scalar(<4 x i8> inreg %a, i32 inre
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB83_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB83_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0
-; GFX9-NEXT: s_branch .LBB83_2
;
; GFX11-LABEL: bitcast_v4i8_to_v1i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB83_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB83_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -10565,9 +10108,6 @@ define inreg <1 x i32> @bitcast_v4i8_to_v1i32_scalar(<4 x i8> inreg %a, i32 inre
; GFX11-NEXT: v_or_b32_e32 v0, v1, v0
; GFX11-NEXT: .LBB83_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB83_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0
-; GFX11-NEXT: s_branch .LBB83_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.352bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.352bit.ll
index 70ed2ca42b706..d18235d157ea8 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.352bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.352bit.ll
@@ -125,7 +125,7 @@ define inreg <11 x float> @bitcast_v11i32_to_v11f32_scalar(<11 x i32> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s27, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -153,14 +153,12 @@ define inreg <11 x float> @bitcast_v11i32_to_v11f32_scalar(<11 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v11i32_to_v11f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -188,14 +186,12 @@ define inreg <11 x float> @bitcast_v11i32_to_v11f32_scalar(<11 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v11i32_to_v11f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -223,15 +219,13 @@ define inreg <11 x float> @bitcast_v11i32_to_v11f32_scalar(<11 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v11i32_to_v11f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -256,8 +250,6 @@ define inreg <11 x float> @bitcast_v11i32_to_v11f32_scalar(<11 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_mov_b32_e32 v10, s22
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -388,9 +380,9 @@ define inreg <11 x i32> @bitcast_v11f32_to_v11i32_scalar(<11 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s27, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v10, s26, 1.0
; SI-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -405,8 +397,6 @@ define inreg <11 x i32> @bitcast_v11f32_to_v11i32_scalar(<11 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -424,9 +414,9 @@ define inreg <11 x i32> @bitcast_v11f32_to_v11i32_scalar(<11 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v10, s26, 1.0
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -441,8 +431,6 @@ define inreg <11 x i32> @bitcast_v11f32_to_v11i32_scalar(<11 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -460,9 +448,9 @@ define inreg <11 x i32> @bitcast_v11f32_to_v11i32_scalar(<11 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v10, s26, 1.0
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -477,8 +465,6 @@ define inreg <11 x i32> @bitcast_v11f32_to_v11i32_scalar(<11 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -501,10 +487,10 @@ define inreg <11 x i32> @bitcast_v11f32_to_v11i32_scalar(<11 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
@@ -519,8 +505,6 @@ define inreg <11 x i32> @bitcast_v11f32_to_v11i32_scalar(<11 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -733,7 +717,7 @@ define inreg <22 x i16> @bitcast_v11i32_to_v22i16_scalar(<11 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s27, 0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s27, s25, 16
; SI-NEXT: s_lshr_b32 s40, s23, 16
@@ -816,25 +800,12 @@ define inreg <22 x i16> @bitcast_v11i32_to_v22i16_scalar(<11 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v9, s14
; SI-NEXT: v_mov_b32_e32 v10, s8
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v11i32_to_v22i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -862,14 +833,12 @@ define inreg <22 x i16> @bitcast_v11i32_to_v22i16_scalar(<11 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v11i32_to_v22i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -897,15 +866,13 @@ define inreg <22 x i16> @bitcast_v11i32_to_v22i16_scalar(<11 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v11i32_to_v22i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -930,8 +897,6 @@ define inreg <22 x i16> @bitcast_v11i32_to_v22i16_scalar(<11 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_mov_b32_e32 v10, s22
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1221,7 +1186,7 @@ define inreg <11 x i32> @bitcast_v22i16_to_v11i32_scalar(<22 x i16> inreg %a, i3
; SI-NEXT: s_lshr_b32 s56, s17, 16
; SI-NEXT: s_lshr_b32 s57, s16, 16
; SI-NEXT: s_cmp_lg_u32 s27, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s57, 16
@@ -1326,15 +1291,12 @@ define inreg <11 x i32> @bitcast_v22i16_to_v11i32_scalar(<22 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v9, s13
; SI-NEXT: v_mov_b32_e32 v10, s14
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v22i16_to_v11i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -1406,16 +1368,14 @@ define inreg <11 x i32> @bitcast_v22i16_to_v11i32_scalar(<22 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v22i16_to_v11i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_4
+; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v10, s26, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
@@ -1430,8 +1390,6 @@ define inreg <11 x i32> @bitcast_v22i16_to_v11i32_scalar(<22 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB7_3:
-; GFX9-NEXT: s_branch .LBB7_2
-; GFX9-NEXT: .LBB7_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1454,10 +1412,10 @@ define inreg <11 x i32> @bitcast_v22i16_to_v11i32_scalar(<22 x i16> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
; GFX11-NEXT: .LBB7_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v10, s22, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
@@ -1472,8 +1430,6 @@ define inreg <11 x i32> @bitcast_v22i16_to_v11i32_scalar(<22 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: s_branch .LBB7_2
-; GFX11-NEXT: .LBB7_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -1686,7 +1642,7 @@ define inreg <22 x half> @bitcast_v11i32_to_v22f16_scalar(<11 x i32> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s27, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s27, s25, 16
; SI-NEXT: s_lshr_b32 s40, s23, 16
@@ -1769,25 +1725,12 @@ define inreg <22 x half> @bitcast_v11i32_to_v22f16_scalar(<11 x i32> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v9, s14
; SI-NEXT: v_mov_b32_e32 v10, s8
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v11i32_to_v22f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -1815,14 +1758,12 @@ define inreg <22 x half> @bitcast_v11i32_to_v22f16_scalar(<11 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v11i32_to_v22f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -1850,15 +1791,13 @@ define inreg <22 x half> @bitcast_v11i32_to_v22f16_scalar(<11 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v11i32_to_v22f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -1883,8 +1822,6 @@ define inreg <22 x half> @bitcast_v11i32_to_v22f16_scalar(<11 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_mov_b32_e32 v10, s22
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2218,7 +2155,7 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s56, s17, 16
; SI-NEXT: s_lshr_b32 s57, s16, 16
; SI-NEXT: s_cmp_lg_u32 s27, 0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s57, 16
@@ -2253,7 +2190,7 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; SI-NEXT: s_and_b32 s14, s26, 0xffff
; SI-NEXT: s_lshl_b32 s27, s15, 16
; SI-NEXT: s_or_b32 s14, s14, s27
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s57
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -2345,9 +2282,6 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v10, v12, v10
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB11_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -2365,9 +2299,9 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s26, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -2427,8 +2361,6 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v0, v11
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2446,9 +2378,9 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v10, s26, v0 op_sel_hi:[1,0]
@@ -2464,8 +2396,6 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2488,10 +2418,10 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v10, 0x200, s22 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
@@ -2506,8 +2436,6 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -2714,7 +2642,7 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s27, 0
-; SI-NEXT: s_cbranch_scc0 .LBB13_3
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s27, s17, 16
; SI-NEXT: s_lshr_b32 s43, s25, 16
@@ -2727,7 +2655,7 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB13_4
+; SI-NEXT: s_cbranch_execnz .LBB13_3
; SI-NEXT: .LBB13_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
; SI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -2751,21 +2679,8 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v1
-; SI-NEXT: s_branch .LBB13_5
+; SI-NEXT: s_branch .LBB13_4
; SI-NEXT: .LBB13_3:
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB13_2
-; SI-NEXT: .LBB13_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -2788,7 +2703,7 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v15, s10
; SI-NEXT: v_mov_b32_e32 v12, s6
; SI-NEXT: v_mov_b32_e32 v11, s4
-; SI-NEXT: .LBB13_5: ; %end
+; SI-NEXT: .LBB13_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v17
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v0, v14
@@ -2828,9 +2743,9 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_3
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB13_4
+; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v10, s26, 1.0
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -2845,8 +2760,6 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB13_3:
-; VI-NEXT: s_branch .LBB13_2
-; VI-NEXT: .LBB13_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2869,9 +2782,9 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB13_4
+; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v10, s26, 1.0
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -2886,8 +2799,6 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB13_3:
-; GFX9-NEXT: s_branch .LBB13_2
-; GFX9-NEXT: .LBB13_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2915,10 +2826,10 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
; GFX11-NEXT: .LBB13_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
@@ -2933,8 +2844,6 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB13_3:
-; GFX11-NEXT: s_branch .LBB13_2
-; GFX11-NEXT: .LBB13_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -3233,7 +3142,7 @@ define inreg <11 x float> @bitcast_v22i16_to_v11f32_scalar(<22 x i16> inreg %a,
; SI-NEXT: s_lshr_b32 s56, s17, 16
; SI-NEXT: s_lshr_b32 s57, s16, 16
; SI-NEXT: s_cmp_lg_u32 s27, 0
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s57, 16
@@ -3338,15 +3247,12 @@ define inreg <11 x float> @bitcast_v22i16_to_v11f32_scalar(<22 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s13
; SI-NEXT: v_mov_b32_e32 v10, s14
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v22i16_to_v11f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB15_3
; VI-NEXT: .LBB15_2: ; %cmp.true
@@ -3418,16 +3324,14 @@ define inreg <11 x float> @bitcast_v22i16_to_v11f32_scalar(<22 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v22i16_to_v11f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v10, s26, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
@@ -3442,8 +3346,6 @@ define inreg <11 x float> @bitcast_v22i16_to_v11f32_scalar(<22 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -3466,10 +3368,10 @@ define inreg <11 x float> @bitcast_v22i16_to_v11f32_scalar(<22 x i16> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v10, s22, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
@@ -3484,8 +3386,6 @@ define inreg <11 x float> @bitcast_v22i16_to_v11f32_scalar(<22 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -3692,7 +3592,7 @@ define inreg <22 x half> @bitcast_v11f32_to_v22f16_scalar(<11 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s27, 0
-; SI-NEXT: s_cbranch_scc0 .LBB17_3
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s27, s17, 16
; SI-NEXT: s_lshr_b32 s43, s25, 16
@@ -3705,7 +3605,7 @@ define inreg <22 x half> @bitcast_v11f32_to_v22f16_scalar(<11 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB17_4
+; SI-NEXT: s_cbranch_execnz .LBB17_3
; SI-NEXT: .LBB17_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
; SI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -3729,21 +3629,8 @@ define inreg <22 x half> @bitcast_v11f32_to_v22f16_scalar(<11 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v1
-; SI-NEXT: s_branch .LBB17_5
+; SI-NEXT: s_branch .LBB17_4
; SI-NEXT: .LBB17_3:
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB17_2
-; SI-NEXT: .LBB17_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -3766,7 +3653,7 @@ define inreg <22 x half> @bitcast_v11f32_to_v22f16_scalar(<11 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v15, s10
; SI-NEXT: v_mov_b32_e32 v12, s6
; SI-NEXT: v_mov_b32_e32 v11, s4
-; SI-NEXT: .LBB17_5: ; %end
+; SI-NEXT: .LBB17_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v17
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v0, v14
@@ -3806,9 +3693,9 @@ define inreg <22 x half> @bitcast_v11f32_to_v22f16_scalar(<11 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_3
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB17_4
+; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v10, s26, 1.0
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -3823,8 +3710,6 @@ define inreg <22 x half> @bitcast_v11f32_to_v22f16_scalar(<11 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB17_3:
-; VI-NEXT: s_branch .LBB17_2
-; VI-NEXT: .LBB17_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -3847,9 +3732,9 @@ define inreg <22 x half> @bitcast_v11f32_to_v22f16_scalar(<11 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB17_4
+; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v10, s26, 1.0
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -3864,8 +3749,6 @@ define inreg <22 x half> @bitcast_v11f32_to_v22f16_scalar(<11 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB17_3:
-; GFX9-NEXT: s_branch .LBB17_2
-; GFX9-NEXT: .LBB17_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -3893,10 +3776,10 @@ define inreg <22 x half> @bitcast_v11f32_to_v22f16_scalar(<11 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
; GFX11-NEXT: .LBB17_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
@@ -3911,8 +3794,6 @@ define inreg <22 x half> @bitcast_v11f32_to_v22f16_scalar(<11 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB17_3:
-; GFX11-NEXT: s_branch .LBB17_2
-; GFX11-NEXT: .LBB17_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -4255,7 +4136,7 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; SI-NEXT: s_lshr_b32 s56, s17, 16
; SI-NEXT: s_lshr_b32 s57, s16, 16
; SI-NEXT: s_cmp_lg_u32 s27, 0
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s57, 16
@@ -4290,7 +4171,7 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; SI-NEXT: s_and_b32 s14, s26, 0xffff
; SI-NEXT: s_lshl_b32 s27, s15, 16
; SI-NEXT: s_or_b32 s14, s14, s27
-; SI-NEXT: s_cbranch_execnz .LBB19_4
+; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s57
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -4382,9 +4263,6 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v10, v12, v10
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14
-; SI-NEXT: s_branch .LBB19_2
-; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -4402,9 +4280,9 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
+; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s26, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -4464,8 +4342,6 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; VI-NEXT: v_or_b32_e32 v0, v0, v11
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
-; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -4483,9 +4359,9 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v10, s26, v0 op_sel_hi:[1,0]
@@ -4501,8 +4377,6 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -4525,10 +4399,10 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v10, 0x200, s22 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
@@ -4543,8 +4417,6 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -4890,7 +4762,7 @@ define inreg <22 x half> @bitcast_v22i16_to_v22f16_scalar(<22 x i16> inreg %a, i
; SI-NEXT: s_lshr_b32 s57, s17, 16
; SI-NEXT: s_lshr_b32 s62, s16, 16
; SI-NEXT: s_cmp_lg_u32 s27, 0
-; SI-NEXT: s_cbranch_scc0 .LBB21_4
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s5, s17, 0xffff
; SI-NEXT: s_lshl_b32 s7, s57, 16
@@ -5049,25 +4921,12 @@ define inreg <22 x half> @bitcast_v22i16_to_v22f16_scalar(<22 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v9, s5
; SI-NEXT: v_mov_b32_e32 v10, s14
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_4:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr27
-; SI-NEXT: s_branch .LBB21_2
;
; VI-LABEL: bitcast_v22i16_to_v22f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
@@ -5139,16 +4998,14 @@ define inreg <22 x half> @bitcast_v22i16_to_v22f16_scalar(<22 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v22i16_to_v22f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
+; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v10, s26, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
@@ -5163,8 +5020,6 @@ define inreg <22 x half> @bitcast_v22i16_to_v22f16_scalar(<22 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
-; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -5192,10 +5047,10 @@ define inreg <22 x half> @bitcast_v22i16_to_v22f16_scalar(<22 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
; GFX11-NEXT: .LBB21_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v10, s22, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
@@ -5210,8 +5065,6 @@ define inreg <22 x half> @bitcast_v22i16_to_v22f16_scalar(<22 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
-; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -5518,9 +5371,9 @@ define inreg <22 x i16> @bitcast_v22f16_to_v22i16_scalar(<22 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s6, s17, 16
; SI-NEXT: s_lshr_b32 s28, s16, 16
; SI-NEXT: s_cmp_lg_u32 s27, 0
-; SI-NEXT: s_cbranch_scc0 .LBB23_3
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_4
+; SI-NEXT: s_cbranch_execnz .LBB23_3
; SI-NEXT: .LBB23_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
; SI-NEXT: v_cvt_f32_f16_e32 v3, s14
@@ -5615,10 +5468,8 @@ define inreg <22 x i16> @bitcast_v22f16_to_v22i16_scalar(<22 x half> inreg %a, i
; SI-NEXT: v_lshr_b64 v[14:15], v[4:5], 16
; SI-NEXT: v_lshr_b64 v[12:13], v[6:7], 16
; SI-NEXT: v_lshr_b64 v[10:11], v[8:9], 16
-; SI-NEXT: s_branch .LBB23_5
+; SI-NEXT: s_branch .LBB23_4
; SI-NEXT: .LBB23_3:
-; SI-NEXT: s_branch .LBB23_2
-; SI-NEXT: .LBB23_4:
; SI-NEXT: v_mov_b32_e32 v20, s12
; SI-NEXT: v_mov_b32_e32 v24, s9
; SI-NEXT: v_mov_b32_e32 v21, s10
@@ -5641,7 +5492,7 @@ define inreg <22 x i16> @bitcast_v22f16_to_v22i16_scalar(<22 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v14, s14
; SI-NEXT: v_mov_b32_e32 v12, s13
; SI-NEXT: v_mov_b32_e32 v10, s11
-; SI-NEXT: .LBB23_5: ; %end
+; SI-NEXT: .LBB23_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v18
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v30
; SI-NEXT: v_or_b32_e32 v0, v2, v0
@@ -5681,9 +5532,9 @@ define inreg <22 x i16> @bitcast_v22f16_to_v22i16_scalar(<22 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_3
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_4
+; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s5, s25, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -5743,8 +5594,6 @@ define inreg <22 x i16> @bitcast_v22f16_to_v22i16_scalar(<22 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v11, v12
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB23_3:
-; VI-NEXT: s_branch .LBB23_2
-; VI-NEXT: .LBB23_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -5767,9 +5616,9 @@ define inreg <22 x i16> @bitcast_v22f16_to_v22i16_scalar(<22 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_4
+; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v10, s26, v0 op_sel_hi:[1,0]
@@ -5785,8 +5634,6 @@ define inreg <22 x i16> @bitcast_v22f16_to_v22i16_scalar(<22 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB23_3:
-; GFX9-NEXT: s_branch .LBB23_2
-; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -5814,10 +5661,10 @@ define inreg <22 x i16> @bitcast_v22f16_to_v22i16_scalar(<22 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
; GFX11-NEXT: .LBB23_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v10, 0x200, s22 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
@@ -5832,8 +5679,6 @@ define inreg <22 x i16> @bitcast_v22f16_to_v22i16_scalar(<22 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
-; GFX11-NEXT: .LBB23_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.384bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.384bit.ll
index 60c5431f7e4c6..3d22be09254e7 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.384bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.384bit.ll
@@ -129,7 +129,7 @@ define inreg <12 x float> @bitcast_v12i32_to_v12f32_scalar(<12 x i32> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -159,14 +159,12 @@ define inreg <12 x float> @bitcast_v12i32_to_v12f32_scalar(<12 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v12i32_to_v12f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -196,14 +194,12 @@ define inreg <12 x float> @bitcast_v12i32_to_v12f32_scalar(<12 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v12i32_to_v12f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -233,15 +229,13 @@ define inreg <12 x float> @bitcast_v12i32_to_v12f32_scalar(<12 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v12i32_to_v12f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -267,8 +261,6 @@ define inreg <12 x float> @bitcast_v12i32_to_v12f32_scalar(<12 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -402,9 +394,9 @@ define inreg <12 x i32> @bitcast_v12f32_to_v12i32_scalar(<12 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v11, s27, 1.0
; SI-NEXT: v_add_f32_e64 v10, s26, 1.0
@@ -420,8 +412,6 @@ define inreg <12 x i32> @bitcast_v12f32_to_v12i32_scalar(<12 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -440,9 +430,9 @@ define inreg <12 x i32> @bitcast_v12f32_to_v12i32_scalar(<12 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v11, s27, 1.0
; VI-NEXT: v_add_f32_e64 v10, s26, 1.0
@@ -458,8 +448,6 @@ define inreg <12 x i32> @bitcast_v12f32_to_v12i32_scalar(<12 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -478,9 +466,9 @@ define inreg <12 x i32> @bitcast_v12f32_to_v12i32_scalar(<12 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v11, s27, 1.0
; GFX9-NEXT: v_add_f32_e64 v10, s26, 1.0
@@ -496,8 +484,6 @@ define inreg <12 x i32> @bitcast_v12f32_to_v12i32_scalar(<12 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -521,10 +507,10 @@ define inreg <12 x i32> @bitcast_v12f32_to_v12i32_scalar(<12 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v11, s23, 1.0
; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
@@ -540,8 +526,6 @@ define inreg <12 x i32> @bitcast_v12f32_to_v12i32_scalar(<12 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -689,7 +673,7 @@ define inreg <6 x double> @bitcast_v12i32_to_v6f64_scalar(<12 x i32> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB5_3
; SI-NEXT: .LBB5_2: ; %cmp.true
@@ -719,14 +703,12 @@ define inreg <6 x double> @bitcast_v12i32_to_v6f64_scalar(<12 x i32> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v12i32_to_v6f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -756,14 +738,12 @@ define inreg <6 x double> @bitcast_v12i32_to_v6f64_scalar(<12 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v12i32_to_v6f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -793,15 +773,13 @@ define inreg <6 x double> @bitcast_v12i32_to_v6f64_scalar(<12 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v12i32_to_v6f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -827,8 +805,6 @@ define inreg <6 x double> @bitcast_v12i32_to_v6f64_scalar(<12 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -945,9 +921,9 @@ define inreg <12 x i32> @bitcast_v6f64_to_v12i32_scalar(<6 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_3
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB7_4
+; SI-NEXT: s_cbranch_execnz .LBB7_3
; SI-NEXT: .LBB7_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; SI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -957,8 +933,6 @@ define inreg <12 x i32> @bitcast_v6f64_to_v12i32_scalar(<6 x double> inreg %a, i
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB7_3:
-; SI-NEXT: s_branch .LBB7_2
-; SI-NEXT: .LBB7_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -977,9 +951,9 @@ define inreg <12 x i32> @bitcast_v6f64_to_v12i32_scalar(<6 x double> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_3
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_4
+; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -989,8 +963,6 @@ define inreg <12 x i32> @bitcast_v6f64_to_v12i32_scalar(<6 x double> inreg %a, i
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB7_3:
-; VI-NEXT: s_branch .LBB7_2
-; VI-NEXT: .LBB7_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1009,9 +981,9 @@ define inreg <12 x i32> @bitcast_v6f64_to_v12i32_scalar(<6 x double> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_4
+; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -1021,8 +993,6 @@ define inreg <12 x i32> @bitcast_v6f64_to_v12i32_scalar(<6 x double> inreg %a, i
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB7_3:
-; GFX9-NEXT: s_branch .LBB7_2
-; GFX9-NEXT: .LBB7_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1046,10 +1016,10 @@ define inreg <12 x i32> @bitcast_v6f64_to_v12i32_scalar(<6 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
; GFX11-NEXT: .LBB7_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
@@ -1059,8 +1029,6 @@ define inreg <12 x i32> @bitcast_v6f64_to_v12i32_scalar(<6 x double> inreg %a, i
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: s_branch .LBB7_2
-; GFX11-NEXT: .LBB7_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -1208,7 +1176,7 @@ define inreg <6 x i64> @bitcast_v12i32_to_v6i64_scalar(<12 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB9_3
; SI-NEXT: .LBB9_2: ; %cmp.true
@@ -1238,14 +1206,12 @@ define inreg <6 x i64> @bitcast_v12i32_to_v6i64_scalar(<12 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v12i32_to_v6i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -1275,14 +1241,12 @@ define inreg <6 x i64> @bitcast_v12i32_to_v6i64_scalar(<12 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v12i32_to_v6i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -1312,15 +1276,13 @@ define inreg <6 x i64> @bitcast_v12i32_to_v6i64_scalar(<12 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v12i32_to_v6i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -1346,8 +1308,6 @@ define inreg <6 x i64> @bitcast_v12i32_to_v6i64_scalar(<12 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1491,7 +1451,7 @@ define inreg <12 x i32> @bitcast_v6i64_to_v12i32_scalar(<6 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB11_4
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
@@ -1521,14 +1481,12 @@ define inreg <12 x i32> @bitcast_v6i64_to_v12i32_scalar(<6 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB11_4:
-; SI-NEXT: s_branch .LBB11_2
;
; VI-LABEL: bitcast_v6i64_to_v12i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_4
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
@@ -1558,14 +1516,12 @@ define inreg <12 x i32> @bitcast_v6i64_to_v12i32_scalar(<6 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_4:
-; VI-NEXT: s_branch .LBB11_2
;
; GFX9-LABEL: bitcast_v6i64_to_v12i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
@@ -1595,15 +1551,13 @@ define inreg <12 x i32> @bitcast_v6i64_to_v12i32_scalar(<6 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_4:
-; GFX9-NEXT: s_branch .LBB11_2
;
; GFX11-LABEL: bitcast_v6i64_to_v12i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
@@ -1629,8 +1583,6 @@ define inreg <12 x i32> @bitcast_v6i64_to_v12i32_scalar(<6 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_4:
-; GFX11-NEXT: s_branch .LBB11_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1846,7 +1798,7 @@ define inreg <24 x i16> @bitcast_v12i32_to_v24i16_scalar(<12 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB13_4
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s40, s27, 16
; SI-NEXT: s_lshr_b32 s41, s25, 16
@@ -1936,26 +1888,12 @@ define inreg <24 x i16> @bitcast_v12i32_to_v24i16_scalar(<12 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v10, s4
; SI-NEXT: v_mov_b32_e32 v11, s15
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB13_4:
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: s_branch .LBB13_2
;
; VI-LABEL: bitcast_v12i32_to_v24i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
@@ -1985,14 +1923,12 @@ define inreg <24 x i16> @bitcast_v12i32_to_v24i16_scalar(<12 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v12i32_to_v24i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
@@ -2022,15 +1958,13 @@ define inreg <24 x i16> @bitcast_v12i32_to_v24i16_scalar(<12 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v12i32_to_v24i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
@@ -2056,8 +1990,6 @@ define inreg <24 x i16> @bitcast_v12i32_to_v24i16_scalar(<12 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2364,7 +2296,7 @@ define inreg <12 x i32> @bitcast_v24i16_to_v12i32_scalar(<24 x i16> inreg %a, i3
; SI-NEXT: s_lshr_b32 s58, s17, 16
; SI-NEXT: s_lshr_b32 s59, s16, 16
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s59, 16
@@ -2478,15 +2410,12 @@ define inreg <12 x i32> @bitcast_v24i16_to_v12i32_scalar(<24 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v10, s14
; SI-NEXT: v_mov_b32_e32 v11, s15
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v24i16_to_v12i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB15_3
; VI-NEXT: .LBB15_2: ; %cmp.true
@@ -2564,16 +2493,14 @@ define inreg <12 x i32> @bitcast_v24i16_to_v12i32_scalar(<24 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v24i16_to_v12i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v11, s27, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, s26, 3 op_sel_hi:[1,0]
@@ -2589,8 +2516,6 @@ define inreg <12 x i32> @bitcast_v24i16_to_v12i32_scalar(<24 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2614,10 +2539,10 @@ define inreg <12 x i32> @bitcast_v24i16_to_v12i32_scalar(<24 x i16> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v11, s23, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v10, s22, 3 op_sel_hi:[1,0]
@@ -2633,8 +2558,6 @@ define inreg <12 x i32> @bitcast_v24i16_to_v12i32_scalar(<24 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -2857,7 +2780,7 @@ define inreg <24 x half> @bitcast_v12i32_to_v24f16_scalar(<12 x i32> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB17_4
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s40, s27, 16
; SI-NEXT: s_lshr_b32 s41, s25, 16
@@ -2947,26 +2870,12 @@ define inreg <24 x half> @bitcast_v12i32_to_v24f16_scalar(<12 x i32> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v10, s4
; SI-NEXT: v_mov_b32_e32 v11, s15
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB17_4:
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: s_branch .LBB17_2
;
; VI-LABEL: bitcast_v12i32_to_v24f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
@@ -2996,14 +2905,12 @@ define inreg <24 x half> @bitcast_v12i32_to_v24f16_scalar(<12 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v12i32_to_v24f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
@@ -3033,15 +2940,13 @@ define inreg <24 x half> @bitcast_v12i32_to_v24f16_scalar(<12 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v12i32_to_v24f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
@@ -3067,8 +2972,6 @@ define inreg <24 x half> @bitcast_v12i32_to_v24f16_scalar(<12 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3423,7 +3326,7 @@ define inreg <12 x i32> @bitcast_v24f16_to_v12i32_scalar(<24 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s58, s17, 16
; SI-NEXT: s_lshr_b32 s59, s16, 16
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s59, 16
@@ -3461,7 +3364,7 @@ define inreg <12 x i32> @bitcast_v24f16_to_v12i32_scalar(<24 x half> inreg %a, i
; SI-NEXT: s_and_b32 s15, s27, 0xffff
; SI-NEXT: s_lshl_b32 s28, s40, 16
; SI-NEXT: s_or_b32 s15, s15, s28
-; SI-NEXT: s_cbranch_execnz .LBB19_4
+; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s59
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -3561,9 +3464,6 @@ define inreg <12 x i32> @bitcast_v24f16_to_v12i32_scalar(<24 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v11, v13, v11
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
-; SI-NEXT: s_branch .LBB19_2
-; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -3582,9 +3482,9 @@ define inreg <12 x i32> @bitcast_v24f16_to_v12i32_scalar(<24 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
+; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s27, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -3649,8 +3549,6 @@ define inreg <12 x i32> @bitcast_v24f16_to_v12i32_scalar(<24 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v0, v12
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
-; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -3669,9 +3567,9 @@ define inreg <12 x i32> @bitcast_v24f16_to_v12i32_scalar(<24 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v11, s27, v0 op_sel_hi:[1,0]
@@ -3688,8 +3586,6 @@ define inreg <12 x i32> @bitcast_v24f16_to_v12i32_scalar(<24 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -3713,10 +3609,10 @@ define inreg <12 x i32> @bitcast_v24f16_to_v12i32_scalar(<24 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v11, 0x200, s23 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v10, 0x200, s22 op_sel_hi:[0,1]
@@ -3732,8 +3628,6 @@ define inreg <12 x i32> @bitcast_v24f16_to_v12i32_scalar(<24 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -3874,9 +3768,9 @@ define inreg <6 x double> @bitcast_v12f32_to_v6f64_scalar(<12 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB21_3
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_4
+; SI-NEXT: s_cbranch_execnz .LBB21_3
; SI-NEXT: .LBB21_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v11, s27, 1.0
; SI-NEXT: v_add_f32_e64 v10, s26, 1.0
@@ -3892,8 +3786,6 @@ define inreg <6 x double> @bitcast_v12f32_to_v6f64_scalar(<12 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB21_3:
-; SI-NEXT: s_branch .LBB21_2
-; SI-NEXT: .LBB21_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -3916,9 +3808,9 @@ define inreg <6 x double> @bitcast_v12f32_to_v6f64_scalar(<12 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_3
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_4
+; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v11, s27, 1.0
; VI-NEXT: v_add_f32_e64 v10, s26, 1.0
@@ -3934,8 +3826,6 @@ define inreg <6 x double> @bitcast_v12f32_to_v6f64_scalar(<12 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB21_3:
-; VI-NEXT: s_branch .LBB21_2
-; VI-NEXT: .LBB21_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -3958,9 +3848,9 @@ define inreg <6 x double> @bitcast_v12f32_to_v6f64_scalar(<12 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
+; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v11, s27, 1.0
; GFX9-NEXT: v_add_f32_e64 v10, s26, 1.0
@@ -3976,8 +3866,6 @@ define inreg <6 x double> @bitcast_v12f32_to_v6f64_scalar(<12 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
-; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -4005,10 +3893,10 @@ define inreg <6 x double> @bitcast_v12f32_to_v6f64_scalar(<12 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
; GFX11-NEXT: .LBB21_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v11, s23, 1.0
; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
@@ -4024,8 +3912,6 @@ define inreg <6 x double> @bitcast_v12f32_to_v6f64_scalar(<12 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
-; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -4151,9 +4037,9 @@ define inreg <12 x float> @bitcast_v6f64_to_v12f32_scalar(<6 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB23_3
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_4
+; SI-NEXT: s_cbranch_execnz .LBB23_3
; SI-NEXT: .LBB23_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; SI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -4163,8 +4049,6 @@ define inreg <12 x float> @bitcast_v6f64_to_v12f32_scalar(<6 x double> inreg %a,
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB23_3:
-; SI-NEXT: s_branch .LBB23_2
-; SI-NEXT: .LBB23_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -4183,9 +4067,9 @@ define inreg <12 x float> @bitcast_v6f64_to_v12f32_scalar(<6 x double> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_3
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_4
+; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -4195,8 +4079,6 @@ define inreg <12 x float> @bitcast_v6f64_to_v12f32_scalar(<6 x double> inreg %a,
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB23_3:
-; VI-NEXT: s_branch .LBB23_2
-; VI-NEXT: .LBB23_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -4215,9 +4097,9 @@ define inreg <12 x float> @bitcast_v6f64_to_v12f32_scalar(<6 x double> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_4
+; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -4227,8 +4109,6 @@ define inreg <12 x float> @bitcast_v6f64_to_v12f32_scalar(<6 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB23_3:
-; GFX9-NEXT: s_branch .LBB23_2
-; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -4252,10 +4132,10 @@ define inreg <12 x float> @bitcast_v6f64_to_v12f32_scalar(<6 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
; GFX11-NEXT: .LBB23_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
@@ -4265,8 +4145,6 @@ define inreg <12 x float> @bitcast_v6f64_to_v12f32_scalar(<6 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
-; GFX11-NEXT: .LBB23_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -4407,9 +4285,9 @@ define inreg <6 x i64> @bitcast_v12f32_to_v6i64_scalar(<12 x float> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB25_3
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_4
+; SI-NEXT: s_cbranch_execnz .LBB25_3
; SI-NEXT: .LBB25_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v11, s27, 1.0
; SI-NEXT: v_add_f32_e64 v10, s26, 1.0
@@ -4425,8 +4303,6 @@ define inreg <6 x i64> @bitcast_v12f32_to_v6i64_scalar(<12 x float> inreg %a, i3
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB25_3:
-; SI-NEXT: s_branch .LBB25_2
-; SI-NEXT: .LBB25_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -4449,9 +4325,9 @@ define inreg <6 x i64> @bitcast_v12f32_to_v6i64_scalar(<12 x float> inreg %a, i3
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB25_3
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_4
+; VI-NEXT: s_cbranch_execnz .LBB25_3
; VI-NEXT: .LBB25_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v11, s27, 1.0
; VI-NEXT: v_add_f32_e64 v10, s26, 1.0
@@ -4467,8 +4343,6 @@ define inreg <6 x i64> @bitcast_v12f32_to_v6i64_scalar(<12 x float> inreg %a, i3
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB25_3:
-; VI-NEXT: s_branch .LBB25_2
-; VI-NEXT: .LBB25_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -4491,9 +4365,9 @@ define inreg <6 x i64> @bitcast_v12f32_to_v6i64_scalar(<12 x float> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_4
+; GFX9-NEXT: s_cbranch_execnz .LBB25_3
; GFX9-NEXT: .LBB25_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v11, s27, 1.0
; GFX9-NEXT: v_add_f32_e64 v10, s26, 1.0
@@ -4509,8 +4383,6 @@ define inreg <6 x i64> @bitcast_v12f32_to_v6i64_scalar(<12 x float> inreg %a, i3
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB25_3:
-; GFX9-NEXT: s_branch .LBB25_2
-; GFX9-NEXT: .LBB25_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -4538,10 +4410,10 @@ define inreg <6 x i64> @bitcast_v12f32_to_v6i64_scalar(<12 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_3
; GFX11-NEXT: .LBB25_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v11, s23, 1.0
; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
@@ -4557,8 +4429,6 @@ define inreg <6 x i64> @bitcast_v12f32_to_v6i64_scalar(<12 x float> inreg %a, i3
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
-; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -4711,7 +4581,7 @@ define inreg <12 x float> @bitcast_v6i64_to_v12f32_scalar(<6 x i64> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB27_4
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB27_3
; SI-NEXT: .LBB27_2: ; %cmp.true
@@ -4741,14 +4611,12 @@ define inreg <12 x float> @bitcast_v6i64_to_v12f32_scalar(<6 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB27_4:
-; SI-NEXT: s_branch .LBB27_2
;
; VI-LABEL: bitcast_v6i64_to_v12f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB27_4
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB27_3
; VI-NEXT: .LBB27_2: ; %cmp.true
@@ -4778,14 +4646,12 @@ define inreg <12 x float> @bitcast_v6i64_to_v12f32_scalar(<6 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_4:
-; VI-NEXT: s_branch .LBB27_2
;
; GFX9-LABEL: bitcast_v6i64_to_v12f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB27_3
; GFX9-NEXT: .LBB27_2: ; %cmp.true
@@ -4815,15 +4681,13 @@ define inreg <12 x float> @bitcast_v6i64_to_v12f32_scalar(<6 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB27_4:
-; GFX9-NEXT: s_branch .LBB27_2
;
; GFX11-LABEL: bitcast_v6i64_to_v12f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB27_3
@@ -4849,8 +4713,6 @@ define inreg <12 x float> @bitcast_v6i64_to_v12f32_scalar(<6 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB27_4:
-; GFX11-NEXT: s_branch .LBB27_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5059,7 +4921,7 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s45, s27, 16
; SI-NEXT: s_lshr_b32 s44, s25, 16
@@ -5073,7 +4935,7 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB29_4
+; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v11, s27, 1.0
; SI-NEXT: v_add_f32_e64 v10, s26, 1.0
@@ -5099,22 +4961,8 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v1
-; SI-NEXT: s_branch .LBB29_5
+; SI-NEXT: s_branch .LBB29_4
; SI-NEXT: .LBB29_3:
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: s_branch .LBB29_2
-; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -5139,7 +4987,7 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v14, s8
; SI-NEXT: v_mov_b32_e32 v13, s6
; SI-NEXT: v_mov_b32_e32 v12, s4
-; SI-NEXT: .LBB29_5: ; %end
+; SI-NEXT: .LBB29_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
@@ -5182,9 +5030,9 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
+; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v11, s27, 1.0
; VI-NEXT: v_add_f32_e64 v10, s26, 1.0
@@ -5200,8 +5048,6 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
-; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -5224,9 +5070,9 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
+; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v11, s27, 1.0
; GFX9-NEXT: v_add_f32_e64 v10, s26, 1.0
@@ -5242,8 +5088,6 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
-; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -5271,10 +5115,10 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-NEXT: .LBB29_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v11, s23, 1.0
; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
@@ -5290,8 +5134,6 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
-; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -5607,7 +5449,7 @@ define inreg <12 x float> @bitcast_v24i16_to_v12f32_scalar(<24 x i16> inreg %a,
; SI-NEXT: s_lshr_b32 s58, s17, 16
; SI-NEXT: s_lshr_b32 s59, s16, 16
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s59, 16
@@ -5721,15 +5563,12 @@ define inreg <12 x float> @bitcast_v24i16_to_v12f32_scalar(<24 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s14
; SI-NEXT: v_mov_b32_e32 v11, s15
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v24i16_to_v12f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB31_3
; VI-NEXT: .LBB31_2: ; %cmp.true
@@ -5807,16 +5646,14 @@ define inreg <12 x float> @bitcast_v24i16_to_v12f32_scalar(<24 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v24i16_to_v12f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_4
+; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v11, s27, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, s26, 3 op_sel_hi:[1,0]
@@ -5832,8 +5669,6 @@ define inreg <12 x float> @bitcast_v24i16_to_v12f32_scalar(<24 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB31_3:
-; GFX9-NEXT: s_branch .LBB31_2
-; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -5857,10 +5692,10 @@ define inreg <12 x float> @bitcast_v24i16_to_v12f32_scalar(<24 x i16> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-NEXT: .LBB31_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v11, s23, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v10, s22, 3 op_sel_hi:[1,0]
@@ -5876,8 +5711,6 @@ define inreg <12 x float> @bitcast_v24i16_to_v12f32_scalar(<24 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
-; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -6093,7 +5926,7 @@ define inreg <24 x half> @bitcast_v12f32_to_v24f16_scalar(<12 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s45, s27, 16
; SI-NEXT: s_lshr_b32 s44, s25, 16
@@ -6107,7 +5940,7 @@ define inreg <24 x half> @bitcast_v12f32_to_v24f16_scalar(<12 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB33_4
+; SI-NEXT: s_cbranch_execnz .LBB33_3
; SI-NEXT: .LBB33_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v11, s27, 1.0
; SI-NEXT: v_add_f32_e64 v10, s26, 1.0
@@ -6133,22 +5966,8 @@ define inreg <24 x half> @bitcast_v12f32_to_v24f16_scalar(<12 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v1
-; SI-NEXT: s_branch .LBB33_5
+; SI-NEXT: s_branch .LBB33_4
; SI-NEXT: .LBB33_3:
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: s_branch .LBB33_2
-; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -6173,7 +5992,7 @@ define inreg <24 x half> @bitcast_v12f32_to_v24f16_scalar(<12 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v14, s8
; SI-NEXT: v_mov_b32_e32 v13, s6
; SI-NEXT: v_mov_b32_e32 v12, s4
-; SI-NEXT: .LBB33_5: ; %end
+; SI-NEXT: .LBB33_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
@@ -6216,9 +6035,9 @@ define inreg <24 x half> @bitcast_v12f32_to_v24f16_scalar(<12 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_4
+; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v11, s27, 1.0
; VI-NEXT: v_add_f32_e64 v10, s26, 1.0
@@ -6234,8 +6053,6 @@ define inreg <24 x half> @bitcast_v12f32_to_v24f16_scalar(<12 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB33_3:
-; VI-NEXT: s_branch .LBB33_2
-; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -6258,9 +6075,9 @@ define inreg <24 x half> @bitcast_v12f32_to_v24f16_scalar(<12 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
+; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v11, s27, 1.0
; GFX9-NEXT: v_add_f32_e64 v10, s26, 1.0
@@ -6276,8 +6093,6 @@ define inreg <24 x half> @bitcast_v12f32_to_v24f16_scalar(<12 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: s_branch .LBB33_2
-; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -6305,10 +6120,10 @@ define inreg <24 x half> @bitcast_v12f32_to_v24f16_scalar(<12 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-NEXT: .LBB33_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v11, s23, 1.0
; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
@@ -6324,8 +6139,6 @@ define inreg <24 x half> @bitcast_v12f32_to_v24f16_scalar(<12 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: s_branch .LBB33_2
-; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -6689,7 +6502,7 @@ define inreg <12 x float> @bitcast_v24f16_to_v12f32_scalar(<24 x half> inreg %a,
; SI-NEXT: s_lshr_b32 s58, s17, 16
; SI-NEXT: s_lshr_b32 s59, s16, 16
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB35_3
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s59, 16
@@ -6727,7 +6540,7 @@ define inreg <12 x float> @bitcast_v24f16_to_v12f32_scalar(<24 x half> inreg %a,
; SI-NEXT: s_and_b32 s15, s27, 0xffff
; SI-NEXT: s_lshl_b32 s28, s40, 16
; SI-NEXT: s_or_b32 s15, s15, s28
-; SI-NEXT: s_cbranch_execnz .LBB35_4
+; SI-NEXT: s_cbranch_execnz .LBB35_3
; SI-NEXT: .LBB35_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s59
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -6827,9 +6640,6 @@ define inreg <12 x float> @bitcast_v24f16_to_v12f32_scalar(<24 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v11, v13, v11
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB35_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
-; SI-NEXT: s_branch .LBB35_2
-; SI-NEXT: .LBB35_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -6848,9 +6658,9 @@ define inreg <12 x float> @bitcast_v24f16_to_v12f32_scalar(<24 x half> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB35_4
+; VI-NEXT: s_cbranch_execnz .LBB35_3
; VI-NEXT: .LBB35_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s27, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -6915,8 +6725,6 @@ define inreg <12 x float> @bitcast_v24f16_to_v12f32_scalar(<24 x half> inreg %a,
; VI-NEXT: v_or_b32_e32 v0, v0, v12
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB35_3:
-; VI-NEXT: s_branch .LBB35_2
-; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -6935,9 +6743,9 @@ define inreg <12 x float> @bitcast_v24f16_to_v12f32_scalar(<24 x half> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
+; GFX9-NEXT: s_cbranch_execnz .LBB35_3
; GFX9-NEXT: .LBB35_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v11, s27, v0 op_sel_hi:[1,0]
@@ -6954,8 +6762,6 @@ define inreg <12 x float> @bitcast_v24f16_to_v12f32_scalar(<24 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
-; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -6979,10 +6785,10 @@ define inreg <12 x float> @bitcast_v24f16_to_v12f32_scalar(<24 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-NEXT: .LBB35_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v11, 0x200, s23 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v10, 0x200, s22 op_sel_hi:[0,1]
@@ -6998,8 +6804,6 @@ define inreg <12 x float> @bitcast_v24f16_to_v12f32_scalar(<24 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
-; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -7123,9 +6927,9 @@ define inreg <6 x i64> @bitcast_v6f64_to_v6i64_scalar(<6 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB37_3
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB37_4
+; SI-NEXT: s_cbranch_execnz .LBB37_3
; SI-NEXT: .LBB37_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
@@ -7135,8 +6939,6 @@ define inreg <6 x i64> @bitcast_v6f64_to_v6i64_scalar(<6 x double> inreg %a, i32
; SI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB37_3:
-; SI-NEXT: s_branch .LBB37_2
-; SI-NEXT: .LBB37_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -7159,9 +6961,9 @@ define inreg <6 x i64> @bitcast_v6f64_to_v6i64_scalar(<6 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB37_3
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_4
+; VI-NEXT: s_cbranch_execnz .LBB37_3
; VI-NEXT: .LBB37_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
@@ -7171,8 +6973,6 @@ define inreg <6 x i64> @bitcast_v6f64_to_v6i64_scalar(<6 x double> inreg %a, i32
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB37_3:
-; VI-NEXT: s_branch .LBB37_2
-; VI-NEXT: .LBB37_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -7195,9 +6995,9 @@ define inreg <6 x i64> @bitcast_v6f64_to_v6i64_scalar(<6 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_4
+; GFX9-NEXT: s_cbranch_execnz .LBB37_3
; GFX9-NEXT: .LBB37_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
@@ -7207,8 +7007,6 @@ define inreg <6 x i64> @bitcast_v6f64_to_v6i64_scalar(<6 x double> inreg %a, i32
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB37_3:
-; GFX9-NEXT: s_branch .LBB37_2
-; GFX9-NEXT: .LBB37_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -7236,10 +7034,10 @@ define inreg <6 x i64> @bitcast_v6f64_to_v6i64_scalar(<6 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
; GFX11-NEXT: .LBB37_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
@@ -7249,8 +7047,6 @@ define inreg <6 x i64> @bitcast_v6f64_to_v6i64_scalar(<6 x double> inreg %a, i32
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: s_branch .LBB37_2
-; GFX11-NEXT: .LBB37_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -7403,7 +7199,7 @@ define inreg <6 x double> @bitcast_v6i64_to_v6f64_scalar(<6 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB39_4
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB39_3
; SI-NEXT: .LBB39_2: ; %cmp.true
@@ -7433,14 +7229,12 @@ define inreg <6 x double> @bitcast_v6i64_to_v6f64_scalar(<6 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB39_4:
-; SI-NEXT: s_branch .LBB39_2
;
; VI-LABEL: bitcast_v6i64_to_v6f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB39_4
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB39_3
; VI-NEXT: .LBB39_2: ; %cmp.true
@@ -7470,14 +7264,12 @@ define inreg <6 x double> @bitcast_v6i64_to_v6f64_scalar(<6 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_4:
-; VI-NEXT: s_branch .LBB39_2
;
; GFX9-LABEL: bitcast_v6i64_to_v6f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB39_3
; GFX9-NEXT: .LBB39_2: ; %cmp.true
@@ -7507,15 +7299,13 @@ define inreg <6 x double> @bitcast_v6i64_to_v6f64_scalar(<6 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB39_4:
-; GFX9-NEXT: s_branch .LBB39_2
;
; GFX11-LABEL: bitcast_v6i64_to_v6f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB39_3
@@ -7540,8 +7330,6 @@ define inreg <6 x double> @bitcast_v6i64_to_v6f64_scalar(<6 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB39_4:
-; GFX11-NEXT: s_branch .LBB39_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7733,7 +7521,7 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB41_3
+; SI-NEXT: s_cbranch_scc0 .LBB41_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s45, s27, 16
; SI-NEXT: s_lshr_b32 s44, s25, 16
@@ -7747,7 +7535,7 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i
; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB41_4
+; SI-NEXT: s_cbranch_execnz .LBB41_3
; SI-NEXT: .LBB41_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; SI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -7767,22 +7555,8 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i
; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v1
-; SI-NEXT: s_branch .LBB41_5
+; SI-NEXT: s_branch .LBB41_4
; SI-NEXT: .LBB41_3:
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: s_branch .LBB41_2
-; SI-NEXT: .LBB41_4:
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v7, s23
@@ -7807,7 +7581,7 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v14, s8
; SI-NEXT: v_mov_b32_e32 v13, s6
; SI-NEXT: v_mov_b32_e32 v12, s4
-; SI-NEXT: .LBB41_5: ; %end
+; SI-NEXT: .LBB41_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
@@ -7850,9 +7624,9 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB41_3
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB41_4
+; VI-NEXT: s_cbranch_execnz .LBB41_3
; VI-NEXT: .LBB41_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -7862,8 +7636,6 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB41_3:
-; VI-NEXT: s_branch .LBB41_2
-; VI-NEXT: .LBB41_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -7886,9 +7658,9 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB41_4
+; GFX9-NEXT: s_cbranch_execnz .LBB41_3
; GFX9-NEXT: .LBB41_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -7898,8 +7670,6 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB41_3:
-; GFX9-NEXT: s_branch .LBB41_2
-; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -7927,10 +7697,10 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
; GFX11-NEXT: .LBB41_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
@@ -7940,8 +7710,6 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB41_3:
-; GFX11-NEXT: s_branch .LBB41_2
-; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -8269,7 +8037,7 @@ define inreg <6 x double> @bitcast_v24i16_to_v6f64_scalar(<24 x i16> inreg %a, i
; SI-NEXT: v_writelane_b32 v12, s50, 6
; SI-NEXT: s_cmp_lg_u32 s28, 0
; SI-NEXT: v_writelane_b32 v12, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB43_4
+; SI-NEXT: s_cbranch_scc0 .LBB43_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s56, 16
@@ -8395,15 +8163,12 @@ define inreg <6 x double> @bitcast_v24i16_to_v6f64_scalar(<24 x i16> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB43_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB43_2
;
; VI-LABEL: bitcast_v24i16_to_v6f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB43_4
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB43_3
; VI-NEXT: .LBB43_2: ; %cmp.true
@@ -8481,16 +8246,14 @@ define inreg <6 x double> @bitcast_v24i16_to_v6f64_scalar(<24 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB43_4:
-; VI-NEXT: s_branch .LBB43_2
;
; GFX9-LABEL: bitcast_v24i16_to_v6f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
+; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v11, s27, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, s26, 3 op_sel_hi:[1,0]
@@ -8506,8 +8269,6 @@ define inreg <6 x double> @bitcast_v24i16_to_v6f64_scalar(<24 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
-; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -8535,10 +8296,10 @@ define inreg <6 x double> @bitcast_v24i16_to_v6f64_scalar(<24 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-NEXT: .LBB43_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v11, s23, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v10, s22, 3 op_sel_hi:[1,0]
@@ -8554,8 +8315,6 @@ define inreg <6 x double> @bitcast_v24i16_to_v6f64_scalar(<24 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
-; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -8756,7 +8515,7 @@ define inreg <24 x half> @bitcast_v6f64_to_v24f16_scalar(<6 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB45_3
+; SI-NEXT: s_cbranch_scc0 .LBB45_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s45, s27, 16
; SI-NEXT: s_lshr_b32 s44, s25, 16
@@ -8770,7 +8529,7 @@ define inreg <24 x half> @bitcast_v6f64_to_v24f16_scalar(<6 x double> inreg %a,
; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB45_4
+; SI-NEXT: s_cbranch_execnz .LBB45_3
; SI-NEXT: .LBB45_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; SI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -8790,22 +8549,8 @@ define inreg <24 x half> @bitcast_v6f64_to_v24f16_scalar(<6 x double> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v1
-; SI-NEXT: s_branch .LBB45_5
+; SI-NEXT: s_branch .LBB45_4
; SI-NEXT: .LBB45_3:
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: s_branch .LBB45_2
-; SI-NEXT: .LBB45_4:
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v7, s23
@@ -8830,7 +8575,7 @@ define inreg <24 x half> @bitcast_v6f64_to_v24f16_scalar(<6 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v14, s8
; SI-NEXT: v_mov_b32_e32 v13, s6
; SI-NEXT: v_mov_b32_e32 v12, s4
-; SI-NEXT: .LBB45_5: ; %end
+; SI-NEXT: .LBB45_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
@@ -8873,9 +8618,9 @@ define inreg <24 x half> @bitcast_v6f64_to_v24f16_scalar(<6 x double> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB45_3
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB45_4
+; VI-NEXT: s_cbranch_execnz .LBB45_3
; VI-NEXT: .LBB45_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -8885,8 +8630,6 @@ define inreg <24 x half> @bitcast_v6f64_to_v24f16_scalar(<6 x double> inreg %a,
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB45_3:
-; VI-NEXT: s_branch .LBB45_2
-; VI-NEXT: .LBB45_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -8909,9 +8652,9 @@ define inreg <24 x half> @bitcast_v6f64_to_v24f16_scalar(<6 x double> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB45_4
+; GFX9-NEXT: s_cbranch_execnz .LBB45_3
; GFX9-NEXT: .LBB45_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -8921,8 +8664,6 @@ define inreg <24 x half> @bitcast_v6f64_to_v24f16_scalar(<6 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB45_3:
-; GFX9-NEXT: s_branch .LBB45_2
-; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -8950,10 +8691,10 @@ define inreg <24 x half> @bitcast_v6f64_to_v24f16_scalar(<6 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
; GFX11-NEXT: .LBB45_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
@@ -8963,8 +8704,6 @@ define inreg <24 x half> @bitcast_v6f64_to_v24f16_scalar(<6 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB45_3:
-; GFX11-NEXT: s_branch .LBB45_2
-; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -9340,7 +9079,7 @@ define inreg <6 x double> @bitcast_v24f16_to_v6f64_scalar(<24 x half> inreg %a,
; SI-NEXT: v_writelane_b32 v16, s50, 6
; SI-NEXT: s_cmp_lg_u32 s28, 0
; SI-NEXT: v_writelane_b32 v16, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB47_3
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s56, 16
@@ -9378,7 +9117,7 @@ define inreg <6 x double> @bitcast_v24f16_to_v6f64_scalar(<24 x half> inreg %a,
; SI-NEXT: s_and_b32 s4, s27, 0xffff
; SI-NEXT: s_lshl_b32 s5, s6, 16
; SI-NEXT: s_or_b32 s47, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB47_4
+; SI-NEXT: s_cbranch_execnz .LBB47_3
; SI-NEXT: .LBB47_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s56
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -9476,11 +9215,8 @@ define inreg <6 x double> @bitcast_v24f16_to_v6f64_scalar(<24 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v10, v11, v10
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v12
; SI-NEXT: v_or_b32_e32 v11, v13, v11
-; SI-NEXT: s_branch .LBB47_5
+; SI-NEXT: s_branch .LBB47_4
; SI-NEXT: .LBB47_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB47_2
-; SI-NEXT: .LBB47_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -9497,7 +9233,7 @@ define inreg <6 x double> @bitcast_v24f16_to_v6f64_scalar(<24 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v13, s49
; SI-NEXT: v_mov_b32_e32 v14, s50
; SI-NEXT: v_mov_b32_e32 v15, s51
-; SI-NEXT: .LBB47_5: ; %end
+; SI-NEXT: .LBB47_4: ; %end
; SI-NEXT: v_readlane_b32 s51, v16, 7
; SI-NEXT: v_readlane_b32 s50, v16, 6
; SI-NEXT: v_readlane_b32 s49, v16, 5
@@ -9516,9 +9252,9 @@ define inreg <6 x double> @bitcast_v24f16_to_v6f64_scalar(<24 x half> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_4
+; VI-NEXT: s_cbranch_execnz .LBB47_3
; VI-NEXT: .LBB47_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s27, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -9583,8 +9319,6 @@ define inreg <6 x double> @bitcast_v24f16_to_v6f64_scalar(<24 x half> inreg %a,
; VI-NEXT: v_or_b32_e32 v0, v0, v12
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB47_3:
-; VI-NEXT: s_branch .LBB47_2
-; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -9607,9 +9341,9 @@ define inreg <6 x double> @bitcast_v24f16_to_v6f64_scalar(<24 x half> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
+; GFX9-NEXT: s_cbranch_execnz .LBB47_3
; GFX9-NEXT: .LBB47_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v11, s27, v0 op_sel_hi:[1,0]
@@ -9626,8 +9360,6 @@ define inreg <6 x double> @bitcast_v24f16_to_v6f64_scalar(<24 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
-; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -9655,10 +9387,10 @@ define inreg <6 x double> @bitcast_v24f16_to_v6f64_scalar(<24 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-NEXT: .LBB47_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v11, 0x200, s23 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v10, 0x200, s22 op_sel_hi:[0,1]
@@ -9674,8 +9406,6 @@ define inreg <6 x double> @bitcast_v24f16_to_v6f64_scalar(<24 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -9903,7 +9633,7 @@ define inreg <24 x i16> @bitcast_v6i64_to_v24i16_scalar(<6 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB49_4
+; SI-NEXT: s_cbranch_scc0 .LBB49_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s40, s27, 16
; SI-NEXT: s_lshr_b32 s41, s25, 16
@@ -9993,26 +9723,12 @@ define inreg <24 x i16> @bitcast_v6i64_to_v24i16_scalar(<6 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v10, s4
; SI-NEXT: v_mov_b32_e32 v11, s15
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB49_4:
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: s_branch .LBB49_2
;
; VI-LABEL: bitcast_v6i64_to_v24i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB49_4
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB49_3
; VI-NEXT: .LBB49_2: ; %cmp.true
@@ -10042,14 +9758,12 @@ define inreg <24 x i16> @bitcast_v6i64_to_v24i16_scalar(<6 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB49_4:
-; VI-NEXT: s_branch .LBB49_2
;
; GFX9-LABEL: bitcast_v6i64_to_v24i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB49_3
; GFX9-NEXT: .LBB49_2: ; %cmp.true
@@ -10079,15 +9793,13 @@ define inreg <24 x i16> @bitcast_v6i64_to_v24i16_scalar(<6 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB49_4:
-; GFX9-NEXT: s_branch .LBB49_2
;
; GFX11-LABEL: bitcast_v6i64_to_v24i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
@@ -10113,8 +9825,6 @@ define inreg <24 x i16> @bitcast_v6i64_to_v24i16_scalar(<6 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_4:
-; GFX11-NEXT: s_branch .LBB49_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10433,7 +10143,7 @@ define inreg <6 x i64> @bitcast_v24i16_to_v6i64_scalar(<24 x i16> inreg %a, i32
; SI-NEXT: v_writelane_b32 v12, s50, 6
; SI-NEXT: s_cmp_lg_u32 s28, 0
; SI-NEXT: v_writelane_b32 v12, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB51_4
+; SI-NEXT: s_cbranch_scc0 .LBB51_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s56, 16
@@ -10559,15 +10269,12 @@ define inreg <6 x i64> @bitcast_v24i16_to_v6i64_scalar(<24 x i16> inreg %a, i32
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB51_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB51_2
;
; VI-LABEL: bitcast_v24i16_to_v6i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB51_4
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB51_3
; VI-NEXT: .LBB51_2: ; %cmp.true
@@ -10645,16 +10352,14 @@ define inreg <6 x i64> @bitcast_v24i16_to_v6i64_scalar(<24 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_4:
-; VI-NEXT: s_branch .LBB51_2
;
; GFX9-LABEL: bitcast_v24i16_to_v6i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB51_4
+; GFX9-NEXT: s_cbranch_execnz .LBB51_3
; GFX9-NEXT: .LBB51_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v11, s27, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, s26, 3 op_sel_hi:[1,0]
@@ -10670,8 +10375,6 @@ define inreg <6 x i64> @bitcast_v24i16_to_v6i64_scalar(<24 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB51_3:
-; GFX9-NEXT: s_branch .LBB51_2
-; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -10699,10 +10402,10 @@ define inreg <6 x i64> @bitcast_v24i16_to_v6i64_scalar(<24 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-NEXT: .LBB51_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v11, s23, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v10, s22, 3 op_sel_hi:[1,0]
@@ -10718,8 +10421,6 @@ define inreg <6 x i64> @bitcast_v24i16_to_v6i64_scalar(<24 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
-; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -10947,7 +10648,7 @@ define inreg <24 x half> @bitcast_v6i64_to_v24f16_scalar(<6 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB53_4
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s40, s27, 16
; SI-NEXT: s_lshr_b32 s41, s25, 16
@@ -11037,26 +10738,12 @@ define inreg <24 x half> @bitcast_v6i64_to_v24f16_scalar(<6 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v10, s4
; SI-NEXT: v_mov_b32_e32 v11, s15
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB53_4:
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: s_branch .LBB53_2
;
; VI-LABEL: bitcast_v6i64_to_v24f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB53_4
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB53_3
; VI-NEXT: .LBB53_2: ; %cmp.true
@@ -11086,14 +10773,12 @@ define inreg <24 x half> @bitcast_v6i64_to_v24f16_scalar(<6 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB53_4:
-; VI-NEXT: s_branch .LBB53_2
;
; GFX9-LABEL: bitcast_v6i64_to_v24f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB53_3
; GFX9-NEXT: .LBB53_2: ; %cmp.true
@@ -11123,15 +10808,13 @@ define inreg <24 x half> @bitcast_v6i64_to_v24f16_scalar(<6 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB53_4:
-; GFX9-NEXT: s_branch .LBB53_2
;
; GFX11-LABEL: bitcast_v6i64_to_v24f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
@@ -11157,8 +10840,6 @@ define inreg <24 x half> @bitcast_v6i64_to_v24f16_scalar(<6 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11525,7 +11206,7 @@ define inreg <6 x i64> @bitcast_v24f16_to_v6i64_scalar(<24 x half> inreg %a, i32
; SI-NEXT: v_writelane_b32 v16, s50, 6
; SI-NEXT: s_cmp_lg_u32 s28, 0
; SI-NEXT: v_writelane_b32 v16, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB55_3
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s56, 16
@@ -11563,7 +11244,7 @@ define inreg <6 x i64> @bitcast_v24f16_to_v6i64_scalar(<24 x half> inreg %a, i32
; SI-NEXT: s_and_b32 s4, s27, 0xffff
; SI-NEXT: s_lshl_b32 s5, s6, 16
; SI-NEXT: s_or_b32 s47, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB55_4
+; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s56
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -11661,11 +11342,8 @@ define inreg <6 x i64> @bitcast_v24f16_to_v6i64_scalar(<24 x half> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v10, v11, v10
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v12
; SI-NEXT: v_or_b32_e32 v11, v13, v11
-; SI-NEXT: s_branch .LBB55_5
+; SI-NEXT: s_branch .LBB55_4
; SI-NEXT: .LBB55_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB55_2
-; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -11682,7 +11360,7 @@ define inreg <6 x i64> @bitcast_v24f16_to_v6i64_scalar(<24 x half> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v13, s49
; SI-NEXT: v_mov_b32_e32 v14, s50
; SI-NEXT: v_mov_b32_e32 v15, s51
-; SI-NEXT: .LBB55_5: ; %end
+; SI-NEXT: .LBB55_4: ; %end
; SI-NEXT: v_readlane_b32 s51, v16, 7
; SI-NEXT: v_readlane_b32 s50, v16, 6
; SI-NEXT: v_readlane_b32 s49, v16, 5
@@ -11701,9 +11379,9 @@ define inreg <6 x i64> @bitcast_v24f16_to_v6i64_scalar(<24 x half> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_4
+; VI-NEXT: s_cbranch_execnz .LBB55_3
; VI-NEXT: .LBB55_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s27, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -11768,8 +11446,6 @@ define inreg <6 x i64> @bitcast_v24f16_to_v6i64_scalar(<24 x half> inreg %a, i32
; VI-NEXT: v_or_b32_e32 v0, v0, v12
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB55_3:
-; VI-NEXT: s_branch .LBB55_2
-; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -11792,9 +11468,9 @@ define inreg <6 x i64> @bitcast_v24f16_to_v6i64_scalar(<24 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
+; GFX9-NEXT: s_cbranch_execnz .LBB55_3
; GFX9-NEXT: .LBB55_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v11, s27, v0 op_sel_hi:[1,0]
@@ -11811,8 +11487,6 @@ define inreg <6 x i64> @bitcast_v24f16_to_v6i64_scalar(<24 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
-; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -11840,10 +11514,10 @@ define inreg <6 x i64> @bitcast_v24f16_to_v6i64_scalar(<24 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-NEXT: .LBB55_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v11, 0x200, s23 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v10, 0x200, s22 op_sel_hi:[0,1]
@@ -11859,8 +11533,6 @@ define inreg <6 x i64> @bitcast_v24f16_to_v6i64_scalar(<24 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
-; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -12231,7 +11903,7 @@ define inreg <24 x half> @bitcast_v24i16_to_v24f16_scalar(<24 x i16> inreg %a, i
; SI-NEXT: s_lshr_b32 s60, s17, 16
; SI-NEXT: s_lshr_b32 s74, s16, 16
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB57_4
+; SI-NEXT: s_cbranch_scc0 .LBB57_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s5, s17, 0xffff
; SI-NEXT: s_lshl_b32 s7, s60, 16
@@ -12405,26 +12077,12 @@ define inreg <24 x half> @bitcast_v24i16_to_v24f16_scalar(<24 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v10, s12
; SI-NEXT: v_mov_b32_e32 v11, s13
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB57_4:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: s_branch .LBB57_2
;
; VI-LABEL: bitcast_v24i16_to_v24f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB57_3
; VI-NEXT: .LBB57_2: ; %cmp.true
@@ -12502,16 +12160,14 @@ define inreg <24 x half> @bitcast_v24i16_to_v24f16_scalar(<24 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v24i16_to_v24f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
+; GFX9-NEXT: s_cbranch_execnz .LBB57_3
; GFX9-NEXT: .LBB57_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v11, s27, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, s26, 3 op_sel_hi:[1,0]
@@ -12527,8 +12183,6 @@ define inreg <24 x half> @bitcast_v24i16_to_v24f16_scalar(<24 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
-; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -12556,10 +12210,10 @@ define inreg <24 x half> @bitcast_v24i16_to_v24f16_scalar(<24 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-NEXT: .LBB57_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v11, s23, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v10, s22, 3 op_sel_hi:[1,0]
@@ -12575,8 +12229,6 @@ define inreg <24 x half> @bitcast_v24i16_to_v24f16_scalar(<24 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB57_3:
-; GFX11-NEXT: s_branch .LBB57_2
-; GFX11-NEXT: .LBB57_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -12902,9 +12554,9 @@ define inreg <24 x i16> @bitcast_v24f16_to_v24i16_scalar(<24 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s6, s17, 16
; SI-NEXT: s_lshr_b32 s40, s16, 16
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB59_3
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB59_4
+; SI-NEXT: s_cbranch_execnz .LBB59_3
; SI-NEXT: .LBB59_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v1, s29
; SI-NEXT: v_cvt_f32_f16_e32 v2, s15
@@ -13008,10 +12660,8 @@ define inreg <24 x i16> @bitcast_v24f16_to_v24i16_scalar(<24 x half> inreg %a, i
; SI-NEXT: v_lshr_b64 v[16:17], v[6:7], 16
; SI-NEXT: v_lshr_b64 v[14:15], v[8:9], 16
; SI-NEXT: v_lshr_b64 v[12:13], v[10:11], 16
-; SI-NEXT: s_branch .LBB59_5
+; SI-NEXT: s_branch .LBB59_4
; SI-NEXT: .LBB59_3:
-; SI-NEXT: s_branch .LBB59_2
-; SI-NEXT: .LBB59_4:
; SI-NEXT: v_mov_b32_e32 v24, s13
; SI-NEXT: v_mov_b32_e32 v28, s10
; SI-NEXT: v_mov_b32_e32 v25, s12
@@ -13036,7 +12686,7 @@ define inreg <24 x i16> @bitcast_v24f16_to_v24i16_scalar(<24 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v16, s14
; SI-NEXT: v_mov_b32_e32 v14, s9
; SI-NEXT: v_mov_b32_e32 v12, s8
-; SI-NEXT: .LBB59_5: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v22
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v34
; SI-NEXT: v_or_b32_e32 v0, v2, v0
@@ -13079,9 +12729,9 @@ define inreg <24 x i16> @bitcast_v24f16_to_v24i16_scalar(<24 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
+; VI-NEXT: s_cbranch_execnz .LBB59_3
; VI-NEXT: .LBB59_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s5, s26, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -13146,8 +12796,6 @@ define inreg <24 x i16> @bitcast_v24f16_to_v24i16_scalar(<24 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v12, v13
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
-; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -13170,9 +12818,9 @@ define inreg <24 x i16> @bitcast_v24f16_to_v24i16_scalar(<24 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
+; GFX9-NEXT: s_cbranch_execnz .LBB59_3
; GFX9-NEXT: .LBB59_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v11, s27, v0 op_sel_hi:[1,0]
@@ -13189,8 +12837,6 @@ define inreg <24 x i16> @bitcast_v24f16_to_v24i16_scalar(<24 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
-; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -13218,10 +12864,10 @@ define inreg <24 x i16> @bitcast_v24f16_to_v24i16_scalar(<24 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-NEXT: .LBB59_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v11, 0x200, s23 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v10, 0x200, s22 op_sel_hi:[0,1]
@@ -13237,8 +12883,6 @@ define inreg <24 x i16> @bitcast_v24f16_to_v24i16_scalar(<24 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB59_3:
-; GFX11-NEXT: s_branch .LBB59_2
-; GFX11-NEXT: .LBB59_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.448bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.448bit.ll
index cd47411c7e51c..506794a563501 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.448bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.448bit.ll
@@ -138,7 +138,7 @@ define inreg <14 x float> @bitcast_v14i32_to_v14f32_scalar(<14 x i32> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -172,15 +172,13 @@ define inreg <14 x float> @bitcast_v14i32_to_v14f32_scalar(<14 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v14i32_to_v14f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -214,15 +212,13 @@ define inreg <14 x float> @bitcast_v14i32_to_v14f32_scalar(<14 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v14i32_to_v14f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -256,15 +252,13 @@ define inreg <14 x float> @bitcast_v14i32_to_v14f32_scalar(<14 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v14i32_to_v14f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -293,8 +287,6 @@ define inreg <14 x float> @bitcast_v14i32_to_v14f32_scalar(<14 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -437,9 +429,9 @@ define inreg <14 x i32> @bitcast_v14f32_to_v14i32_scalar(<14 x float> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v13, s29, 1.0
; SI-NEXT: v_add_f32_e64 v12, s28, 1.0
@@ -457,8 +449,6 @@ define inreg <14 x i32> @bitcast_v14f32_to_v14i32_scalar(<14 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -482,9 +472,9 @@ define inreg <14 x i32> @bitcast_v14f32_to_v14i32_scalar(<14 x float> inreg %a,
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v13, s29, 1.0
; VI-NEXT: v_add_f32_e64 v12, s28, 1.0
@@ -502,8 +492,6 @@ define inreg <14 x i32> @bitcast_v14f32_to_v14i32_scalar(<14 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -527,9 +515,9 @@ define inreg <14 x i32> @bitcast_v14f32_to_v14i32_scalar(<14 x float> inreg %a,
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v13, s29, 1.0
; GFX9-NEXT: v_add_f32_e64 v12, s28, 1.0
@@ -547,8 +535,6 @@ define inreg <14 x i32> @bitcast_v14f32_to_v14i32_scalar(<14 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -576,10 +562,10 @@ define inreg <14 x i32> @bitcast_v14f32_to_v14i32_scalar(<14 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v13, s25, 1.0
; GFX11-NEXT: v_add_f32_e64 v12, s24, 1.0
@@ -597,8 +583,6 @@ define inreg <14 x i32> @bitcast_v14f32_to_v14i32_scalar(<14 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -757,7 +741,7 @@ define inreg <7 x i64> @bitcast_v14i32_to_v7i64_scalar(<14 x i32> inreg %a, i32
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB5_3
; SI-NEXT: .LBB5_2: ; %cmp.true
@@ -791,15 +775,13 @@ define inreg <7 x i64> @bitcast_v14i32_to_v7i64_scalar(<14 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v14i32_to_v7i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -833,15 +815,13 @@ define inreg <7 x i64> @bitcast_v14i32_to_v7i64_scalar(<14 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v14i32_to_v7i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -875,15 +855,13 @@ define inreg <7 x i64> @bitcast_v14i32_to_v7i64_scalar(<14 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v14i32_to_v7i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -912,8 +890,6 @@ define inreg <7 x i64> @bitcast_v14i32_to_v7i64_scalar(<14 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1067,7 +1043,7 @@ define inreg <14 x i32> @bitcast_v7i64_to_v14i32_scalar(<7 x i64> inreg %a, i32
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB7_3
; SI-NEXT: .LBB7_2: ; %cmp.true
@@ -1101,15 +1077,13 @@ define inreg <14 x i32> @bitcast_v7i64_to_v14i32_scalar(<7 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v7i64_to_v14i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -1143,15 +1117,13 @@ define inreg <14 x i32> @bitcast_v7i64_to_v14i32_scalar(<7 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v7i64_to_v14i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
@@ -1185,15 +1157,13 @@ define inreg <14 x i32> @bitcast_v7i64_to_v14i32_scalar(<7 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v7i64_to_v14i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
@@ -1222,8 +1192,6 @@ define inreg <14 x i32> @bitcast_v7i64_to_v14i32_scalar(<7 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1373,7 +1341,7 @@ define inreg <7 x double> @bitcast_v14i32_to_v7f64_scalar(<14 x i32> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB9_3
; SI-NEXT: .LBB9_2: ; %cmp.true
@@ -1407,15 +1375,13 @@ define inreg <7 x double> @bitcast_v14i32_to_v7f64_scalar(<14 x i32> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v14i32_to_v7f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -1449,15 +1415,13 @@ define inreg <7 x double> @bitcast_v14i32_to_v7f64_scalar(<14 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v14i32_to_v7f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -1491,15 +1455,13 @@ define inreg <7 x double> @bitcast_v14i32_to_v7f64_scalar(<14 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v14i32_to_v7f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -1528,8 +1490,6 @@ define inreg <7 x double> @bitcast_v14i32_to_v7f64_scalar(<14 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1651,9 +1611,9 @@ define inreg <14 x i32> @bitcast_v7f64_to_v14i32_scalar(<7 x double> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; SI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
@@ -1664,8 +1624,6 @@ define inreg <14 x i32> @bitcast_v7f64_to_v14i32_scalar(<7 x double> inreg %a, i
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB11_3:
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -1689,9 +1647,9 @@ define inreg <14 x i32> @bitcast_v7f64_to_v14i32_scalar(<7 x double> inreg %a, i
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
@@ -1702,8 +1660,6 @@ define inreg <14 x i32> @bitcast_v7f64_to_v14i32_scalar(<7 x double> inreg %a, i
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1727,9 +1683,9 @@ define inreg <14 x i32> @bitcast_v7f64_to_v14i32_scalar(<7 x double> inreg %a, i
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
@@ -1740,8 +1696,6 @@ define inreg <14 x i32> @bitcast_v7f64_to_v14i32_scalar(<7 x double> inreg %a, i
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1769,10 +1723,10 @@ define inreg <14 x i32> @bitcast_v7f64_to_v14i32_scalar(<7 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
@@ -1783,8 +1737,6 @@ define inreg <14 x i32> @bitcast_v7f64_to_v14i32_scalar(<7 x double> inreg %a, i
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -2030,7 +1982,7 @@ define inreg <28 x i16> @bitcast_v14i32_to_v28i16_scalar(<14 x i32> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB13_4
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s44, s29, 16
; SI-NEXT: s_lshr_b32 s45, s27, 16
@@ -2134,29 +2086,13 @@ define inreg <28 x i16> @bitcast_v14i32_to_v28i16_scalar(<14 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v12, s4
; SI-NEXT: v_mov_b32_e32 v13, s17
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB13_4:
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: s_branch .LBB13_2
;
; VI-LABEL: bitcast_v14i32_to_v28i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
@@ -2190,15 +2126,13 @@ define inreg <28 x i16> @bitcast_v14i32_to_v28i16_scalar(<14 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v14i32_to_v28i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
@@ -2232,15 +2166,13 @@ define inreg <28 x i16> @bitcast_v14i32_to_v28i16_scalar(<14 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v14i32_to_v28i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
@@ -2269,8 +2201,6 @@ define inreg <28 x i16> @bitcast_v14i32_to_v28i16_scalar(<14 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2624,7 +2554,7 @@ define inreg <14 x i32> @bitcast_v28i16_to_v14i32_scalar(<28 x i16> inreg %a, i3
; SI-NEXT: v_writelane_b32 v14, s50, 6
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v14, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s59, 16
@@ -2768,16 +2698,13 @@ define inreg <14 x i32> @bitcast_v28i16_to_v14i32_scalar(<28 x i16> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v28i16_to_v14i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB15_3
; VI-NEXT: .LBB15_2: ; %cmp.true
@@ -2867,17 +2794,15 @@ define inreg <14 x i32> @bitcast_v28i16_to_v14i32_scalar(<28 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v28i16_to_v14i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v13, s29, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s28, 3 op_sel_hi:[1,0]
@@ -2895,8 +2820,6 @@ define inreg <14 x i32> @bitcast_v28i16_to_v14i32_scalar(<28 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2924,10 +2847,10 @@ define inreg <14 x i32> @bitcast_v28i16_to_v14i32_scalar(<28 x i16> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v12, s24, 3 op_sel_hi:[1,0]
@@ -2945,8 +2868,6 @@ define inreg <14 x i32> @bitcast_v28i16_to_v14i32_scalar(<28 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -3192,7 +3113,7 @@ define inreg <28 x half> @bitcast_v14i32_to_v28f16_scalar(<14 x i32> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB17_4
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s44, s29, 16
; SI-NEXT: s_lshr_b32 s45, s27, 16
@@ -3296,29 +3217,13 @@ define inreg <28 x half> @bitcast_v14i32_to_v28f16_scalar(<14 x i32> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v12, s4
; SI-NEXT: v_mov_b32_e32 v13, s17
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB17_4:
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: s_branch .LBB17_2
;
; VI-LABEL: bitcast_v14i32_to_v28f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
@@ -3352,15 +3257,13 @@ define inreg <28 x half> @bitcast_v14i32_to_v28f16_scalar(<14 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v14i32_to_v28f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
@@ -3394,15 +3297,13 @@ define inreg <28 x half> @bitcast_v14i32_to_v28f16_scalar(<14 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v14i32_to_v28f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
@@ -3431,8 +3332,6 @@ define inreg <28 x half> @bitcast_v14i32_to_v28f16_scalar(<14 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3842,7 +3741,7 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; SI-NEXT: v_writelane_b32 v16, s50, 6
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v16, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s59, 16
@@ -3886,7 +3785,7 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; SI-NEXT: s_and_b32 s4, s29, 0xffff
; SI-NEXT: s_lshl_b32 s5, s6, 16
; SI-NEXT: s_or_b32 s49, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB19_4
+; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s59
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -4000,11 +3899,8 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v12, v13, v12
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v14
; SI-NEXT: v_or_b32_e32 v13, v15, v13
-; SI-NEXT: s_branch .LBB19_5
+; SI-NEXT: s_branch .LBB19_4
; SI-NEXT: .LBB19_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB19_2
-; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -4021,7 +3917,7 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v13, s49
; SI-NEXT: v_mov_b32_e32 v14, s50
; SI-NEXT: v_mov_b32_e32 v15, s51
-; SI-NEXT: .LBB19_5: ; %end
+; SI-NEXT: .LBB19_4: ; %end
; SI-NEXT: v_readlane_b32 s51, v16, 7
; SI-NEXT: v_readlane_b32 s50, v16, 6
; SI-NEXT: v_readlane_b32 s49, v16, 5
@@ -4041,9 +3937,9 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
+; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s29, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -4118,8 +4014,6 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v0, v14
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
-; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -4143,9 +4037,9 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v13, s29, v0 op_sel_hi:[1,0]
@@ -4164,8 +4058,6 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -4193,10 +4085,10 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v12, 0x200, s24 op_sel_hi:[0,1]
@@ -4214,8 +4106,6 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -4367,9 +4257,9 @@ define inreg <7 x i64> @bitcast_v14f32_to_v7i64_scalar(<14 x float> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB21_3
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_4
+; SI-NEXT: s_cbranch_execnz .LBB21_3
; SI-NEXT: .LBB21_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v13, s29, 1.0
; SI-NEXT: v_add_f32_e64 v12, s28, 1.0
@@ -4387,8 +4277,6 @@ define inreg <7 x i64> @bitcast_v14f32_to_v7i64_scalar(<14 x float> inreg %a, i3
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB21_3:
-; SI-NEXT: s_branch .LBB21_2
-; SI-NEXT: .LBB21_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -4412,9 +4300,9 @@ define inreg <7 x i64> @bitcast_v14f32_to_v7i64_scalar(<14 x float> inreg %a, i3
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_3
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_4
+; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v13, s29, 1.0
; VI-NEXT: v_add_f32_e64 v12, s28, 1.0
@@ -4432,8 +4320,6 @@ define inreg <7 x i64> @bitcast_v14f32_to_v7i64_scalar(<14 x float> inreg %a, i3
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB21_3:
-; VI-NEXT: s_branch .LBB21_2
-; VI-NEXT: .LBB21_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -4457,9 +4343,9 @@ define inreg <7 x i64> @bitcast_v14f32_to_v7i64_scalar(<14 x float> inreg %a, i3
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
+; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v13, s29, 1.0
; GFX9-NEXT: v_add_f32_e64 v12, s28, 1.0
@@ -4477,8 +4363,6 @@ define inreg <7 x i64> @bitcast_v14f32_to_v7i64_scalar(<14 x float> inreg %a, i3
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
-; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -4506,10 +4390,10 @@ define inreg <7 x i64> @bitcast_v14f32_to_v7i64_scalar(<14 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
; GFX11-NEXT: .LBB21_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v13, s25, 1.0
; GFX11-NEXT: v_add_f32_e64 v12, s24, 1.0
@@ -4527,8 +4411,6 @@ define inreg <7 x i64> @bitcast_v14f32_to_v7i64_scalar(<14 x float> inreg %a, i3
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
-; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -4691,7 +4573,7 @@ define inreg <14 x float> @bitcast_v7i64_to_v14f32_scalar(<7 x i64> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB23_3
; SI-NEXT: .LBB23_2: ; %cmp.true
@@ -4725,15 +4607,13 @@ define inreg <14 x float> @bitcast_v7i64_to_v14f32_scalar(<7 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v7i64_to_v14f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
@@ -4767,15 +4647,13 @@ define inreg <14 x float> @bitcast_v7i64_to_v14f32_scalar(<7 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v7i64_to_v14f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
@@ -4809,15 +4687,13 @@ define inreg <14 x float> @bitcast_v7i64_to_v14f32_scalar(<7 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v7i64_to_v14f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
@@ -4846,8 +4722,6 @@ define inreg <14 x float> @bitcast_v7i64_to_v14f32_scalar(<7 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: s_branch .LBB23_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4990,9 +4864,9 @@ define inreg <7 x double> @bitcast_v14f32_to_v7f64_scalar(<14 x float> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB25_3
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_4
+; SI-NEXT: s_cbranch_execnz .LBB25_3
; SI-NEXT: .LBB25_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v13, s29, 1.0
; SI-NEXT: v_add_f32_e64 v12, s28, 1.0
@@ -5010,8 +4884,6 @@ define inreg <7 x double> @bitcast_v14f32_to_v7f64_scalar(<14 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB25_3:
-; SI-NEXT: s_branch .LBB25_2
-; SI-NEXT: .LBB25_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -5035,9 +4907,9 @@ define inreg <7 x double> @bitcast_v14f32_to_v7f64_scalar(<14 x float> inreg %a,
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB25_3
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_4
+; VI-NEXT: s_cbranch_execnz .LBB25_3
; VI-NEXT: .LBB25_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v13, s29, 1.0
; VI-NEXT: v_add_f32_e64 v12, s28, 1.0
@@ -5055,8 +4927,6 @@ define inreg <7 x double> @bitcast_v14f32_to_v7f64_scalar(<14 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB25_3:
-; VI-NEXT: s_branch .LBB25_2
-; VI-NEXT: .LBB25_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -5080,9 +4950,9 @@ define inreg <7 x double> @bitcast_v14f32_to_v7f64_scalar(<14 x float> inreg %a,
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_4
+; GFX9-NEXT: s_cbranch_execnz .LBB25_3
; GFX9-NEXT: .LBB25_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v13, s29, 1.0
; GFX9-NEXT: v_add_f32_e64 v12, s28, 1.0
@@ -5100,8 +4970,6 @@ define inreg <7 x double> @bitcast_v14f32_to_v7f64_scalar(<14 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB25_3:
-; GFX9-NEXT: s_branch .LBB25_2
-; GFX9-NEXT: .LBB25_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -5129,10 +4997,10 @@ define inreg <7 x double> @bitcast_v14f32_to_v7f64_scalar(<14 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_3
; GFX11-NEXT: .LBB25_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v13, s25, 1.0
; GFX11-NEXT: v_add_f32_e64 v12, s24, 1.0
@@ -5150,8 +5018,6 @@ define inreg <7 x double> @bitcast_v14f32_to_v7f64_scalar(<14 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
-; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -5282,9 +5148,9 @@ define inreg <14 x float> @bitcast_v7f64_to_v14f32_scalar(<7 x double> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB27_3
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB27_4
+; SI-NEXT: s_cbranch_execnz .LBB27_3
; SI-NEXT: .LBB27_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; SI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
@@ -5295,8 +5161,6 @@ define inreg <14 x float> @bitcast_v7f64_to_v14f32_scalar(<7 x double> inreg %a,
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB27_3:
-; SI-NEXT: s_branch .LBB27_2
-; SI-NEXT: .LBB27_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -5320,9 +5184,9 @@ define inreg <14 x float> @bitcast_v7f64_to_v14f32_scalar(<7 x double> inreg %a,
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB27_3
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_4
+; VI-NEXT: s_cbranch_execnz .LBB27_3
; VI-NEXT: .LBB27_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
@@ -5333,8 +5197,6 @@ define inreg <14 x float> @bitcast_v7f64_to_v14f32_scalar(<7 x double> inreg %a,
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB27_3:
-; VI-NEXT: s_branch .LBB27_2
-; VI-NEXT: .LBB27_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -5358,9 +5220,9 @@ define inreg <14 x float> @bitcast_v7f64_to_v14f32_scalar(<7 x double> inreg %a,
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_4
+; GFX9-NEXT: s_cbranch_execnz .LBB27_3
; GFX9-NEXT: .LBB27_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
@@ -5371,8 +5233,6 @@ define inreg <14 x float> @bitcast_v7f64_to_v14f32_scalar(<7 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB27_3:
-; GFX9-NEXT: s_branch .LBB27_2
-; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -5400,10 +5260,10 @@ define inreg <14 x float> @bitcast_v7f64_to_v14f32_scalar(<7 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB27_3
; GFX11-NEXT: .LBB27_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
@@ -5414,8 +5274,6 @@ define inreg <14 x float> @bitcast_v7f64_to_v14f32_scalar(<7 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
-; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -5654,7 +5512,7 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s58, s29, 16
; SI-NEXT: s_lshr_b32 s57, s27, 16
@@ -5670,7 +5528,7 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB29_4
+; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v13, s29, 1.0
; SI-NEXT: v_add_f32_e64 v12, s28, 1.0
@@ -5700,24 +5558,8 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; SI-NEXT: s_branch .LBB29_5
+; SI-NEXT: s_branch .LBB29_4
; SI-NEXT: .LBB29_3:
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: s_branch .LBB29_2
-; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -5746,7 +5588,7 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v18, s12
; SI-NEXT: v_mov_b32_e32 v19, s14
; SI-NEXT: v_mov_b32_e32 v20, s40
-; SI-NEXT: .LBB29_5: ; %end
+; SI-NEXT: .LBB29_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
@@ -5796,9 +5638,9 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a,
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
+; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v13, s29, 1.0
; VI-NEXT: v_add_f32_e64 v12, s28, 1.0
@@ -5816,8 +5658,6 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
-; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -5841,9 +5681,9 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a,
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
+; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v13, s29, 1.0
; GFX9-NEXT: v_add_f32_e64 v12, s28, 1.0
@@ -5861,8 +5701,6 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
-; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -5890,10 +5728,10 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-NEXT: .LBB29_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v13, s25, 1.0
; GFX11-NEXT: v_add_f32_e64 v12, s24, 1.0
@@ -5911,8 +5749,6 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
-; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -6275,7 +6111,7 @@ define inreg <14 x float> @bitcast_v28i16_to_v14f32_scalar(<28 x i16> inreg %a,
; SI-NEXT: v_writelane_b32 v14, s50, 6
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v14, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s59, 16
@@ -6419,16 +6255,13 @@ define inreg <14 x float> @bitcast_v28i16_to_v14f32_scalar(<28 x i16> inreg %a,
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v28i16_to_v14f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB31_3
; VI-NEXT: .LBB31_2: ; %cmp.true
@@ -6518,17 +6351,15 @@ define inreg <14 x float> @bitcast_v28i16_to_v14f32_scalar(<28 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v28i16_to_v14f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_4
+; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v13, s29, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s28, 3 op_sel_hi:[1,0]
@@ -6546,8 +6377,6 @@ define inreg <14 x float> @bitcast_v28i16_to_v14f32_scalar(<28 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB31_3:
-; GFX9-NEXT: s_branch .LBB31_2
-; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -6575,10 +6404,10 @@ define inreg <14 x float> @bitcast_v28i16_to_v14f32_scalar(<28 x i16> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-NEXT: .LBB31_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v12, s24, 3 op_sel_hi:[1,0]
@@ -6596,8 +6425,6 @@ define inreg <14 x float> @bitcast_v28i16_to_v14f32_scalar(<28 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
-; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -6836,7 +6663,7 @@ define inreg <28 x half> @bitcast_v14f32_to_v28f16_scalar(<14 x float> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s58, s29, 16
; SI-NEXT: s_lshr_b32 s57, s27, 16
@@ -6852,7 +6679,7 @@ define inreg <28 x half> @bitcast_v14f32_to_v28f16_scalar(<14 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB33_4
+; SI-NEXT: s_cbranch_execnz .LBB33_3
; SI-NEXT: .LBB33_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v13, s29, 1.0
; SI-NEXT: v_add_f32_e64 v12, s28, 1.0
@@ -6882,24 +6709,8 @@ define inreg <28 x half> @bitcast_v14f32_to_v28f16_scalar(<14 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; SI-NEXT: s_branch .LBB33_5
+; SI-NEXT: s_branch .LBB33_4
; SI-NEXT: .LBB33_3:
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: s_branch .LBB33_2
-; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -6928,7 +6739,7 @@ define inreg <28 x half> @bitcast_v14f32_to_v28f16_scalar(<14 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v18, s12
; SI-NEXT: v_mov_b32_e32 v19, s14
; SI-NEXT: v_mov_b32_e32 v20, s40
-; SI-NEXT: .LBB33_5: ; %end
+; SI-NEXT: .LBB33_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
@@ -6978,9 +6789,9 @@ define inreg <28 x half> @bitcast_v14f32_to_v28f16_scalar(<14 x float> inreg %a,
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_4
+; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v13, s29, 1.0
; VI-NEXT: v_add_f32_e64 v12, s28, 1.0
@@ -6998,8 +6809,6 @@ define inreg <28 x half> @bitcast_v14f32_to_v28f16_scalar(<14 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB33_3:
-; VI-NEXT: s_branch .LBB33_2
-; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -7023,9 +6832,9 @@ define inreg <28 x half> @bitcast_v14f32_to_v28f16_scalar(<14 x float> inreg %a,
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
+; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v13, s29, 1.0
; GFX9-NEXT: v_add_f32_e64 v12, s28, 1.0
@@ -7043,8 +6852,6 @@ define inreg <28 x half> @bitcast_v14f32_to_v28f16_scalar(<14 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: s_branch .LBB33_2
-; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -7072,10 +6879,10 @@ define inreg <28 x half> @bitcast_v14f32_to_v28f16_scalar(<14 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-NEXT: .LBB33_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v13, s25, 1.0
; GFX11-NEXT: v_add_f32_e64 v12, s24, 1.0
@@ -7093,8 +6900,6 @@ define inreg <28 x half> @bitcast_v14f32_to_v28f16_scalar(<14 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: s_branch .LBB33_2
-; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -7513,7 +7318,7 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; SI-NEXT: v_writelane_b32 v16, s50, 6
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v16, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB35_3
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s59, 16
@@ -7557,7 +7362,7 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; SI-NEXT: s_and_b32 s4, s29, 0xffff
; SI-NEXT: s_lshl_b32 s5, s6, 16
; SI-NEXT: s_or_b32 s49, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB35_4
+; SI-NEXT: s_cbranch_execnz .LBB35_3
; SI-NEXT: .LBB35_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s59
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -7671,11 +7476,8 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v12, v13, v12
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v14
; SI-NEXT: v_or_b32_e32 v13, v15, v13
-; SI-NEXT: s_branch .LBB35_5
+; SI-NEXT: s_branch .LBB35_4
; SI-NEXT: .LBB35_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB35_2
-; SI-NEXT: .LBB35_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -7692,7 +7494,7 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v13, s49
; SI-NEXT: v_mov_b32_e32 v14, s50
; SI-NEXT: v_mov_b32_e32 v15, s51
-; SI-NEXT: .LBB35_5: ; %end
+; SI-NEXT: .LBB35_4: ; %end
; SI-NEXT: v_readlane_b32 s51, v16, 7
; SI-NEXT: v_readlane_b32 s50, v16, 6
; SI-NEXT: v_readlane_b32 s49, v16, 5
@@ -7712,9 +7514,9 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB35_4
+; VI-NEXT: s_cbranch_execnz .LBB35_3
; VI-NEXT: .LBB35_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s29, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -7789,8 +7591,6 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; VI-NEXT: v_or_b32_e32 v0, v0, v14
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB35_3:
-; VI-NEXT: s_branch .LBB35_2
-; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -7814,9 +7614,9 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
+; GFX9-NEXT: s_cbranch_execnz .LBB35_3
; GFX9-NEXT: .LBB35_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v13, s29, v0 op_sel_hi:[1,0]
@@ -7835,8 +7635,6 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
-; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -7864,10 +7662,10 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-NEXT: .LBB35_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v12, 0x200, s24 op_sel_hi:[0,1]
@@ -7885,8 +7683,6 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
-; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -8049,7 +7845,7 @@ define inreg <7 x double> @bitcast_v7i64_to_v7f64_scalar(<7 x i64> inreg %a, i32
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB37_4
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB37_3
; SI-NEXT: .LBB37_2: ; %cmp.true
@@ -8083,15 +7879,13 @@ define inreg <7 x double> @bitcast_v7i64_to_v7f64_scalar(<7 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_4:
-; SI-NEXT: s_branch .LBB37_2
;
; VI-LABEL: bitcast_v7i64_to_v7f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB37_3
; VI-NEXT: .LBB37_2: ; %cmp.true
@@ -8125,15 +7919,13 @@ define inreg <7 x double> @bitcast_v7i64_to_v7f64_scalar(<7 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v7i64_to_v7f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB37_3
; GFX9-NEXT: .LBB37_2: ; %cmp.true
@@ -8167,15 +7959,13 @@ define inreg <7 x double> @bitcast_v7i64_to_v7f64_scalar(<7 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v7i64_to_v7f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
@@ -8203,8 +7993,6 @@ define inreg <7 x double> @bitcast_v7i64_to_v7f64_scalar(<7 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_4:
-; GFX11-NEXT: s_branch .LBB37_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8326,9 +8114,9 @@ define inreg <7 x i64> @bitcast_v7f64_to_v7i64_scalar(<7 x double> inreg %a, i32
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB39_3
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_4
+; SI-NEXT: s_cbranch_execnz .LBB39_3
; SI-NEXT: .LBB39_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
@@ -8339,8 +8127,6 @@ define inreg <7 x i64> @bitcast_v7f64_to_v7i64_scalar(<7 x double> inreg %a, i32
; SI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB39_3:
-; SI-NEXT: s_branch .LBB39_2
-; SI-NEXT: .LBB39_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -8364,9 +8150,9 @@ define inreg <7 x i64> @bitcast_v7f64_to_v7i64_scalar(<7 x double> inreg %a, i32
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB39_3
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_4
+; VI-NEXT: s_cbranch_execnz .LBB39_3
; VI-NEXT: .LBB39_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
@@ -8377,8 +8163,6 @@ define inreg <7 x i64> @bitcast_v7f64_to_v7i64_scalar(<7 x double> inreg %a, i32
; VI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB39_3:
-; VI-NEXT: s_branch .LBB39_2
-; VI-NEXT: .LBB39_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -8402,9 +8186,9 @@ define inreg <7 x i64> @bitcast_v7f64_to_v7i64_scalar(<7 x double> inreg %a, i32
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
+; GFX9-NEXT: s_cbranch_execnz .LBB39_3
; GFX9-NEXT: .LBB39_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
@@ -8415,8 +8199,6 @@ define inreg <7 x i64> @bitcast_v7f64_to_v7i64_scalar(<7 x double> inreg %a, i32
; GFX9-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
-; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -8444,10 +8226,10 @@ define inreg <7 x i64> @bitcast_v7f64_to_v7i64_scalar(<7 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_3
; GFX11-NEXT: .LBB39_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
@@ -8458,8 +8240,6 @@ define inreg <7 x i64> @bitcast_v7f64_to_v7i64_scalar(<7 x double> inreg %a, i32
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
-; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -8709,7 +8489,7 @@ define inreg <28 x i16> @bitcast_v7i64_to_v28i16_scalar(<7 x i64> inreg %a, i32
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB41_4
+; SI-NEXT: s_cbranch_scc0 .LBB41_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s44, s29, 16
; SI-NEXT: s_lshr_b32 s45, s27, 16
@@ -8813,29 +8593,13 @@ define inreg <28 x i16> @bitcast_v7i64_to_v28i16_scalar(<7 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v12, s4
; SI-NEXT: v_mov_b32_e32 v13, s17
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB41_4:
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: s_branch .LBB41_2
;
; VI-LABEL: bitcast_v7i64_to_v28i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB41_4
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB41_3
; VI-NEXT: .LBB41_2: ; %cmp.true
@@ -8869,15 +8633,13 @@ define inreg <28 x i16> @bitcast_v7i64_to_v28i16_scalar(<7 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB41_4:
-; VI-NEXT: s_branch .LBB41_2
;
; GFX9-LABEL: bitcast_v7i64_to_v28i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB41_3
; GFX9-NEXT: .LBB41_2: ; %cmp.true
@@ -8911,15 +8673,13 @@ define inreg <28 x i16> @bitcast_v7i64_to_v28i16_scalar(<7 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB41_4:
-; GFX9-NEXT: s_branch .LBB41_2
;
; GFX11-LABEL: bitcast_v7i64_to_v28i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
@@ -8948,8 +8708,6 @@ define inreg <28 x i16> @bitcast_v7i64_to_v28i16_scalar(<7 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB41_4:
-; GFX11-NEXT: s_branch .LBB41_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9303,7 +9061,7 @@ define inreg <7 x i64> @bitcast_v28i16_to_v7i64_scalar(<28 x i16> inreg %a, i32
; SI-NEXT: v_writelane_b32 v14, s50, 6
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v14, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB43_4
+; SI-NEXT: s_cbranch_scc0 .LBB43_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s59, 16
@@ -9447,16 +9205,13 @@ define inreg <7 x i64> @bitcast_v28i16_to_v7i64_scalar(<28 x i16> inreg %a, i32
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB43_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB43_2
;
; VI-LABEL: bitcast_v28i16_to_v7i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB43_4
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB43_3
; VI-NEXT: .LBB43_2: ; %cmp.true
@@ -9546,17 +9301,15 @@ define inreg <7 x i64> @bitcast_v28i16_to_v7i64_scalar(<28 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB43_4:
-; VI-NEXT: s_branch .LBB43_2
;
; GFX9-LABEL: bitcast_v28i16_to_v7i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
+; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v13, s29, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s28, 3 op_sel_hi:[1,0]
@@ -9574,8 +9327,6 @@ define inreg <7 x i64> @bitcast_v28i16_to_v7i64_scalar(<28 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
-; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -9603,10 +9354,10 @@ define inreg <7 x i64> @bitcast_v28i16_to_v7i64_scalar(<28 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-NEXT: .LBB43_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v12, s24, 3 op_sel_hi:[1,0]
@@ -9624,8 +9375,6 @@ define inreg <7 x i64> @bitcast_v28i16_to_v7i64_scalar(<28 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
-; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -9875,7 +9624,7 @@ define inreg <28 x half> @bitcast_v7i64_to_v28f16_scalar(<7 x i64> inreg %a, i32
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB45_4
+; SI-NEXT: s_cbranch_scc0 .LBB45_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s44, s29, 16
; SI-NEXT: s_lshr_b32 s45, s27, 16
@@ -9979,29 +9728,13 @@ define inreg <28 x half> @bitcast_v7i64_to_v28f16_scalar(<7 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v12, s4
; SI-NEXT: v_mov_b32_e32 v13, s17
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB45_4:
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: s_branch .LBB45_2
;
; VI-LABEL: bitcast_v7i64_to_v28f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB45_4
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB45_3
; VI-NEXT: .LBB45_2: ; %cmp.true
@@ -10035,15 +9768,13 @@ define inreg <28 x half> @bitcast_v7i64_to_v28f16_scalar(<7 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_4:
-; VI-NEXT: s_branch .LBB45_2
;
; GFX9-LABEL: bitcast_v7i64_to_v28f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB45_3
; GFX9-NEXT: .LBB45_2: ; %cmp.true
@@ -10077,15 +9808,13 @@ define inreg <28 x half> @bitcast_v7i64_to_v28f16_scalar(<7 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_4:
-; GFX9-NEXT: s_branch .LBB45_2
;
; GFX11-LABEL: bitcast_v7i64_to_v28f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
@@ -10114,8 +9843,6 @@ define inreg <28 x half> @bitcast_v7i64_to_v28f16_scalar(<7 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_4:
-; GFX11-NEXT: s_branch .LBB45_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10525,7 +10252,7 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; SI-NEXT: v_writelane_b32 v16, s50, 6
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v16, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB47_3
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s59, 16
@@ -10569,7 +10296,7 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; SI-NEXT: s_and_b32 s4, s29, 0xffff
; SI-NEXT: s_lshl_b32 s5, s6, 16
; SI-NEXT: s_or_b32 s49, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB47_4
+; SI-NEXT: s_cbranch_execnz .LBB47_3
; SI-NEXT: .LBB47_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s59
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -10683,11 +10410,8 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v12, v13, v12
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v14
; SI-NEXT: v_or_b32_e32 v13, v15, v13
-; SI-NEXT: s_branch .LBB47_5
+; SI-NEXT: s_branch .LBB47_4
; SI-NEXT: .LBB47_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB47_2
-; SI-NEXT: .LBB47_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -10704,7 +10428,7 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v13, s49
; SI-NEXT: v_mov_b32_e32 v14, s50
; SI-NEXT: v_mov_b32_e32 v15, s51
-; SI-NEXT: .LBB47_5: ; %end
+; SI-NEXT: .LBB47_4: ; %end
; SI-NEXT: v_readlane_b32 s51, v16, 7
; SI-NEXT: v_readlane_b32 s50, v16, 6
; SI-NEXT: v_readlane_b32 s49, v16, 5
@@ -10724,9 +10448,9 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_4
+; VI-NEXT: s_cbranch_execnz .LBB47_3
; VI-NEXT: .LBB47_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s29, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -10801,8 +10525,6 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; VI-NEXT: v_or_b32_e32 v0, v0, v14
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB47_3:
-; VI-NEXT: s_branch .LBB47_2
-; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -10826,9 +10548,9 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
+; GFX9-NEXT: s_cbranch_execnz .LBB47_3
; GFX9-NEXT: .LBB47_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v13, s29, v0 op_sel_hi:[1,0]
@@ -10847,8 +10569,6 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
-; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -10876,10 +10596,10 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-NEXT: .LBB47_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v12, 0x200, s24 op_sel_hi:[0,1]
@@ -10897,8 +10617,6 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -11116,7 +10834,7 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB49_3
+; SI-NEXT: s_cbranch_scc0 .LBB49_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s58, s29, 16
; SI-NEXT: s_lshr_b32 s57, s27, 16
@@ -11132,7 +10850,7 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i
; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB49_4
+; SI-NEXT: s_cbranch_execnz .LBB49_3
; SI-NEXT: .LBB49_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; SI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
@@ -11155,24 +10873,8 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i
; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; SI-NEXT: s_branch .LBB49_5
+; SI-NEXT: s_branch .LBB49_4
; SI-NEXT: .LBB49_3:
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: s_branch .LBB49_2
-; SI-NEXT: .LBB49_4:
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v9, s25
@@ -11201,7 +10903,7 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v16, s8
; SI-NEXT: v_mov_b32_e32 v15, s6
; SI-NEXT: v_mov_b32_e32 v14, s4
-; SI-NEXT: .LBB49_5: ; %end
+; SI-NEXT: .LBB49_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
@@ -11251,9 +10953,9 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB49_3
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB49_4
+; VI-NEXT: s_cbranch_execnz .LBB49_3
; VI-NEXT: .LBB49_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
@@ -11264,8 +10966,6 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB49_3:
-; VI-NEXT: s_branch .LBB49_2
-; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -11289,9 +10989,9 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
+; GFX9-NEXT: s_cbranch_execnz .LBB49_3
; GFX9-NEXT: .LBB49_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
@@ -11302,8 +11002,6 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: s_branch .LBB49_2
-; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -11331,10 +11029,10 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
@@ -11345,8 +11043,6 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: s_branch .LBB49_2
-; GFX11-NEXT: .LBB49_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -11709,7 +11405,7 @@ define inreg <7 x double> @bitcast_v28i16_to_v7f64_scalar(<28 x i16> inreg %a, i
; SI-NEXT: v_writelane_b32 v14, s50, 6
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v14, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB51_4
+; SI-NEXT: s_cbranch_scc0 .LBB51_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s59, 16
@@ -11853,16 +11549,13 @@ define inreg <7 x double> @bitcast_v28i16_to_v7f64_scalar(<28 x i16> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB51_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB51_2
;
; VI-LABEL: bitcast_v28i16_to_v7f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB51_4
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB51_3
; VI-NEXT: .LBB51_2: ; %cmp.true
@@ -11952,17 +11645,15 @@ define inreg <7 x double> @bitcast_v28i16_to_v7f64_scalar(<28 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_4:
-; VI-NEXT: s_branch .LBB51_2
;
; GFX9-LABEL: bitcast_v28i16_to_v7f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB51_4
+; GFX9-NEXT: s_cbranch_execnz .LBB51_3
; GFX9-NEXT: .LBB51_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v13, s29, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s28, 3 op_sel_hi:[1,0]
@@ -11980,8 +11671,6 @@ define inreg <7 x double> @bitcast_v28i16_to_v7f64_scalar(<28 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB51_3:
-; GFX9-NEXT: s_branch .LBB51_2
-; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -12009,10 +11698,10 @@ define inreg <7 x double> @bitcast_v28i16_to_v7f64_scalar(<28 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-NEXT: .LBB51_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v12, s24, 3 op_sel_hi:[1,0]
@@ -12030,8 +11719,6 @@ define inreg <7 x double> @bitcast_v28i16_to_v7f64_scalar(<28 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
-; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -12249,7 +11936,7 @@ define inreg <28 x half> @bitcast_v7f64_to_v28f16_scalar(<7 x double> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB53_3
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s58, s29, 16
; SI-NEXT: s_lshr_b32 s57, s27, 16
@@ -12265,7 +11952,7 @@ define inreg <28 x half> @bitcast_v7f64_to_v28f16_scalar(<7 x double> inreg %a,
; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB53_4
+; SI-NEXT: s_cbranch_execnz .LBB53_3
; SI-NEXT: .LBB53_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; SI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
@@ -12288,24 +11975,8 @@ define inreg <28 x half> @bitcast_v7f64_to_v28f16_scalar(<7 x double> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; SI-NEXT: s_branch .LBB53_5
+; SI-NEXT: s_branch .LBB53_4
; SI-NEXT: .LBB53_3:
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: s_branch .LBB53_2
-; SI-NEXT: .LBB53_4:
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v9, s25
@@ -12334,7 +12005,7 @@ define inreg <28 x half> @bitcast_v7f64_to_v28f16_scalar(<7 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v16, s8
; SI-NEXT: v_mov_b32_e32 v15, s6
; SI-NEXT: v_mov_b32_e32 v14, s4
-; SI-NEXT: .LBB53_5: ; %end
+; SI-NEXT: .LBB53_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
@@ -12384,9 +12055,9 @@ define inreg <28 x half> @bitcast_v7f64_to_v28f16_scalar(<7 x double> inreg %a,
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB53_3
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB53_4
+; VI-NEXT: s_cbranch_execnz .LBB53_3
; VI-NEXT: .LBB53_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
@@ -12397,8 +12068,6 @@ define inreg <28 x half> @bitcast_v7f64_to_v28f16_scalar(<7 x double> inreg %a,
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB53_3:
-; VI-NEXT: s_branch .LBB53_2
-; VI-NEXT: .LBB53_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -12422,9 +12091,9 @@ define inreg <28 x half> @bitcast_v7f64_to_v28f16_scalar(<7 x double> inreg %a,
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB53_4
+; GFX9-NEXT: s_cbranch_execnz .LBB53_3
; GFX9-NEXT: .LBB53_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
@@ -12435,8 +12104,6 @@ define inreg <28 x half> @bitcast_v7f64_to_v28f16_scalar(<7 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB53_3:
-; GFX9-NEXT: s_branch .LBB53_2
-; GFX9-NEXT: .LBB53_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -12464,10 +12131,10 @@ define inreg <28 x half> @bitcast_v7f64_to_v28f16_scalar(<7 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-NEXT: .LBB53_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
@@ -12478,8 +12145,6 @@ define inreg <28 x half> @bitcast_v7f64_to_v28f16_scalar(<7 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB53_3:
-; GFX11-NEXT: s_branch .LBB53_2
-; GFX11-NEXT: .LBB53_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -12898,7 +12563,7 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; SI-NEXT: v_writelane_b32 v16, s50, 6
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v16, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB55_3
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s59, 16
@@ -12942,7 +12607,7 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; SI-NEXT: s_and_b32 s4, s29, 0xffff
; SI-NEXT: s_lshl_b32 s5, s6, 16
; SI-NEXT: s_or_b32 s49, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB55_4
+; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s59
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -13056,11 +12721,8 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v12, v13, v12
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v14
; SI-NEXT: v_or_b32_e32 v13, v15, v13
-; SI-NEXT: s_branch .LBB55_5
+; SI-NEXT: s_branch .LBB55_4
; SI-NEXT: .LBB55_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB55_2
-; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -13077,7 +12739,7 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v13, s49
; SI-NEXT: v_mov_b32_e32 v14, s50
; SI-NEXT: v_mov_b32_e32 v15, s51
-; SI-NEXT: .LBB55_5: ; %end
+; SI-NEXT: .LBB55_4: ; %end
; SI-NEXT: v_readlane_b32 s51, v16, 7
; SI-NEXT: v_readlane_b32 s50, v16, 6
; SI-NEXT: v_readlane_b32 s49, v16, 5
@@ -13097,9 +12759,9 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_4
+; VI-NEXT: s_cbranch_execnz .LBB55_3
; VI-NEXT: .LBB55_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s29, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -13174,8 +12836,6 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; VI-NEXT: v_or_b32_e32 v0, v0, v14
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB55_3:
-; VI-NEXT: s_branch .LBB55_2
-; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -13199,9 +12859,9 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
+; GFX9-NEXT: s_cbranch_execnz .LBB55_3
; GFX9-NEXT: .LBB55_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v13, s29, v0 op_sel_hi:[1,0]
@@ -13220,8 +12880,6 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
-; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -13249,10 +12907,10 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-NEXT: .LBB55_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v12, 0x200, s24 op_sel_hi:[0,1]
@@ -13270,8 +12928,6 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
-; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -13708,7 +13364,7 @@ define inreg <28 x half> @bitcast_v28i16_to_v28f16_scalar(<28 x i16> inreg %a, i
; SI-NEXT: s_lshr_b32 s89, s16, 16
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB57_4
+; SI-NEXT: s_cbranch_scc0 .LBB57_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s5, s17, 0xffff
; SI-NEXT: s_lshl_b32 s7, s74, 16
@@ -13910,29 +13566,13 @@ define inreg <28 x half> @bitcast_v28i16_to_v28f16_scalar(<28 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v12, s14
; SI-NEXT: v_mov_b32_e32 v13, s15
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB57_4:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: s_branch .LBB57_2
;
; VI-LABEL: bitcast_v28i16_to_v28f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB57_3
; VI-NEXT: .LBB57_2: ; %cmp.true
@@ -14022,17 +13662,15 @@ define inreg <28 x half> @bitcast_v28i16_to_v28f16_scalar(<28 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v28i16_to_v28f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
+; GFX9-NEXT: s_cbranch_execnz .LBB57_3
; GFX9-NEXT: .LBB57_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v13, s29, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s28, 3 op_sel_hi:[1,0]
@@ -14050,8 +13688,6 @@ define inreg <28 x half> @bitcast_v28i16_to_v28f16_scalar(<28 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
-; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -14079,10 +13715,10 @@ define inreg <28 x half> @bitcast_v28i16_to_v28f16_scalar(<28 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-NEXT: .LBB57_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v12, s24, 3 op_sel_hi:[1,0]
@@ -14100,8 +13736,6 @@ define inreg <28 x half> @bitcast_v28i16_to_v28f16_scalar(<28 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB57_3:
-; GFX11-NEXT: s_branch .LBB57_2
-; GFX11-NEXT: .LBB57_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -14465,9 +14099,9 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s43, s16, 16
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB59_3
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB59_4
+; SI-NEXT: s_cbranch_execnz .LBB59_3
; SI-NEXT: .LBB59_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v1, s42
; SI-NEXT: v_cvt_f32_f16_e32 v2, s16
@@ -14588,10 +14222,8 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i
; SI-NEXT: v_lshr_b64 v[18:19], v[8:9], 16
; SI-NEXT: v_lshr_b64 v[16:17], v[10:11], 16
; SI-NEXT: v_lshr_b64 v[14:15], v[12:13], 16
-; SI-NEXT: s_branch .LBB59_5
+; SI-NEXT: s_branch .LBB59_4
; SI-NEXT: .LBB59_3:
-; SI-NEXT: s_branch .LBB59_2
-; SI-NEXT: .LBB59_4:
; SI-NEXT: v_mov_b32_e32 v28, s15
; SI-NEXT: v_mov_b32_e32 v33, s10
; SI-NEXT: v_mov_b32_e32 v29, s11
@@ -14620,7 +14252,7 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v18, s14
; SI-NEXT: v_mov_b32_e32 v16, s13
; SI-NEXT: v_mov_b32_e32 v14, s12
-; SI-NEXT: .LBB59_5: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v26
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v49
; SI-NEXT: v_or_b32_e32 v0, v2, v0
@@ -14670,9 +14302,9 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_readfirstlane_b32 s4, v0
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
+; VI-NEXT: s_cbranch_execnz .LBB59_3
; VI-NEXT: .LBB59_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s5, s28, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -14747,8 +14379,6 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v14, v15
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
-; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -14772,9 +14402,9 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
+; GFX9-NEXT: s_cbranch_execnz .LBB59_3
; GFX9-NEXT: .LBB59_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v13, s29, v0 op_sel_hi:[1,0]
@@ -14793,8 +14423,6 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
-; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -14822,10 +14450,10 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-NEXT: .LBB59_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v12, 0x200, s24 op_sel_hi:[0,1]
@@ -14843,8 +14471,6 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB59_3:
-; GFX11-NEXT: s_branch .LBB59_2
-; GFX11-NEXT: .LBB59_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll
index 547985e7ef4e3..8915b5ab7fcf3 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll
@@ -253,7 +253,7 @@ define inreg <3 x half> @bitcast_v3bf16_to_v3f16_scalar(<3 x bfloat> inreg %a, i
; SI-NEXT: v_mul_f32_e64 v9, 1.0, s5
; SI-NEXT: v_mul_f32_e64 v8, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s6
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v9
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v8
@@ -278,19 +278,14 @@ define inreg <3 x half> @bitcast_v3bf16_to_v3f16_scalar(<3 x bfloat> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v6
; SI-NEXT: v_or_b32_e32 v0, v0, v2
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: ; implicit-def: $vgpr6
-; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v3bf16_to_v3f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_3
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_4
+; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v1, 0x40c00000
@@ -323,8 +318,6 @@ define inreg <3 x half> @bitcast_v3bf16_to_v3f16_scalar(<3 x bfloat> inreg %a, i
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB1_3:
-; VI-NEXT: s_branch .LBB1_2
-; VI-NEXT: .LBB1_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -333,9 +326,9 @@ define inreg <3 x half> @bitcast_v3bf16_to_v3f16_scalar(<3 x bfloat> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_4
+; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
; GFX9-NEXT: s_lshl_b32 s4, s17, 16
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -371,8 +364,6 @@ define inreg <3 x half> @bitcast_v3bf16_to_v3f16_scalar(<3 x bfloat> inreg %a, i
; GFX9-NEXT: v_lshl_or_b32 v1, s4, 16, v1
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB1_3:
-; GFX9-NEXT: s_branch .LBB1_2
-; GFX9-NEXT: .LBB1_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -382,10 +373,10 @@ define inreg <3 x half> @bitcast_v3bf16_to_v3f16_scalar(<3 x bfloat> inreg %a, i
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB1_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB1_3
; GFX11-TRUE16-NEXT: .LBB1_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
@@ -425,8 +416,6 @@ define inreg <3 x half> @bitcast_v3bf16_to_v3f16_scalar(<3 x bfloat> inreg %a, i
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v3.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB1_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB1_2
-; GFX11-TRUE16-NEXT: .LBB1_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -435,10 +424,10 @@ define inreg <3 x half> @bitcast_v3bf16_to_v3f16_scalar(<3 x bfloat> inreg %a, i
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB1_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB1_3
; GFX11-FAKE16-NEXT: .LBB1_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s0, 16
; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
@@ -479,8 +468,6 @@ define inreg <3 x half> @bitcast_v3bf16_to_v3f16_scalar(<3 x bfloat> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v2, 16, v0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB1_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB1_2
-; GFX11-FAKE16-NEXT: .LBB1_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -614,12 +601,12 @@ define inreg <3 x bfloat> @bitcast_v3f16_to_v3bf16_scalar(<3 x half> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s5, s16, 16
; SI-NEXT: s_lshl_b32 s8, s4, 16
; SI-NEXT: s_lshl_b32 s9, s17, 16
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s17
; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
@@ -633,17 +620,12 @@ define inreg <3 x bfloat> @bitcast_v3f16_to_v3bf16_scalar(<3 x half> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v2
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; SI-NEXT: s_branch .LBB3_5
+; SI-NEXT: s_branch .LBB3_4
; SI-NEXT: .LBB3_3:
-; SI-NEXT: ; implicit-def: $sgpr5
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v2, s9
; SI-NEXT: v_mov_b32_e32 v1, s8
; SI-NEXT: v_mov_b32_e32 v0, s5
-; SI-NEXT: .LBB3_5: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
@@ -656,9 +638,9 @@ define inreg <3 x bfloat> @bitcast_v3f16_to_v3bf16_scalar(<3 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -670,8 +652,6 @@ define inreg <3 x bfloat> @bitcast_v3f16_to_v3bf16_scalar(<3 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v1, 0x7e000000, v1
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -680,17 +660,15 @@ define inreg <3 x bfloat> @bitcast_v3f16_to_v3bf16_scalar(<3 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -700,17 +678,15 @@ define inreg <3 x bfloat> @bitcast_v3f16_to_v3bf16_scalar(<3 x half> inreg %a, i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -971,7 +947,7 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s6
; SI-NEXT: v_mul_f32_e64 v0, 1.0, s5
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s4
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v0
@@ -994,19 +970,14 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v3
; SI-NEXT: v_or_b32_e32 v0, v0, v2
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v3bf16_to_v3i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_3
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_4
+; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v1, 0x40c00000
@@ -1039,8 +1010,6 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB5_3:
-; VI-NEXT: s_branch .LBB5_2
-; VI-NEXT: .LBB5_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -1049,9 +1018,9 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_4
+; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
; GFX9-NEXT: s_lshl_b32 s4, s17, 16
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -1087,8 +1056,6 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; GFX9-NEXT: v_lshl_or_b32 v1, s4, 16, v1
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB5_3:
-; GFX9-NEXT: s_branch .LBB5_2
-; GFX9-NEXT: .LBB5_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -1098,10 +1065,10 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB5_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB5_3
; GFX11-TRUE16-NEXT: .LBB5_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s0, 16
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
@@ -1137,8 +1104,6 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v4.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB5_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB5_2
-; GFX11-TRUE16-NEXT: .LBB5_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -1147,10 +1112,10 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB5_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB5_3
; GFX11-FAKE16-NEXT: .LBB5_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s0, 16
; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
@@ -1188,8 +1153,6 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v2, v0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB5_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB5_2
-; GFX11-FAKE16-NEXT: .LBB5_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -1311,7 +1274,7 @@ define inreg <3 x bfloat> @bitcast_v3i16_to_v3bf16_scalar(<3 x i16> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s5, s16, 16
; SI-NEXT: s_lshl_b32 s8, s4, 16
@@ -1335,17 +1298,12 @@ define inreg <3 x bfloat> @bitcast_v3i16_to_v3bf16_scalar(<3 x i16> inreg %a, i3
; SI-NEXT: v_lshr_b64 v[0:1], v[0:1], 16
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v2
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: ; implicit-def: $sgpr5
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v3i16_to_v3bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -1359,23 +1317,19 @@ define inreg <3 x bfloat> @bitcast_v3i16_to_v3bf16_scalar(<3 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v3i16_to_v3bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_4
+; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB7_3:
-; GFX9-NEXT: s_branch .LBB7_2
-; GFX9-NEXT: .LBB7_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -1385,17 +1339,15 @@ define inreg <3 x bfloat> @bitcast_v3i16_to_v3bf16_scalar(<3 x i16> inreg %a, i3
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
; GFX11-NEXT: .LBB7_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: s_branch .LBB7_2
-; GFX11-NEXT: .LBB7_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -1515,9 +1467,9 @@ define inreg <3 x i16> @bitcast_v3f16_to_v3i16_scalar(<3 x half> inreg %a, i32 i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_3
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB9_4
+; SI-NEXT: s_cbranch_execnz .LBB9_3
; SI-NEXT: .LBB9_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s4
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -1531,14 +1483,12 @@ define inreg <3 x i16> @bitcast_v3f16_to_v3i16_scalar(<3 x half> inreg %a, i32 i
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; SI-NEXT: v_lshr_b64 v[2:3], v[0:1], 16
; SI-NEXT: v_or_b32_e32 v0, v4, v0
-; SI-NEXT: s_branch .LBB9_5
+; SI-NEXT: s_branch .LBB9_4
; SI-NEXT: .LBB9_3:
-; SI-NEXT: s_branch .LBB9_2
-; SI-NEXT: .LBB9_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v2, s4
-; SI-NEXT: .LBB9_5: ; %end
+; SI-NEXT: .LBB9_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v0, v0, v2
@@ -1548,9 +1498,9 @@ define inreg <3 x i16> @bitcast_v3f16_to_v3i16_scalar(<3 x half> inreg %a, i32 i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_3
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_4
+; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -1562,8 +1512,6 @@ define inreg <3 x i16> @bitcast_v3f16_to_v3i16_scalar(<3 x half> inreg %a, i32 i
; VI-NEXT: v_or_b32_e32 v1, 0x7e000000, v1
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB9_3:
-; VI-NEXT: s_branch .LBB9_2
-; VI-NEXT: .LBB9_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -1572,17 +1520,15 @@ define inreg <3 x i16> @bitcast_v3f16_to_v3i16_scalar(<3 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_4
+; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB9_3:
-; GFX9-NEXT: s_branch .LBB9_2
-; GFX9-NEXT: .LBB9_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -1592,17 +1538,15 @@ define inreg <3 x i16> @bitcast_v3f16_to_v3i16_scalar(<3 x half> inreg %a, i32 i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
; GFX11-NEXT: .LBB9_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB9_3:
-; GFX11-NEXT: s_branch .LBB9_2
-; GFX11-NEXT: .LBB9_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -1728,7 +1672,7 @@ define inreg <3 x half> @bitcast_v3i16_to_v3f16_scalar(<3 x i16> inreg %a, i32 i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB11_4
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s8, s6, 16
@@ -1753,17 +1697,12 @@ define inreg <3 x half> @bitcast_v3i16_to_v3f16_scalar(<3 x i16> inreg %a, i32 i
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s7
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB11_4:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB11_2
;
; VI-LABEL: bitcast_v3i16_to_v3f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_4
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
@@ -1777,23 +1716,19 @@ define inreg <3 x half> @bitcast_v3i16_to_v3f16_scalar(<3 x i16> inreg %a, i32 i
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_4:
-; VI-NEXT: s_branch .LBB11_2
;
; GFX9-LABEL: bitcast_v3i16_to_v3f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -1803,17 +1738,15 @@ define inreg <3 x half> @bitcast_v3i16_to_v3f16_scalar(<3 x i16> inreg %a, i32 i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
index 0e75a35879156..3b03c8a15fe18 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
@@ -148,7 +148,7 @@ define inreg <16 x float> @bitcast_v16i32_to_v16f32_scalar(<16 x i32> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s6, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s7, v0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -186,8 +186,6 @@ define inreg <16 x float> @bitcast_v16i32_to_v16f32_scalar(<16 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v14, s7
; SI-NEXT: v_mov_b32_e32 v15, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v16i32_to_v16f32_scalar:
; VI: ; %bb.0:
@@ -196,7 +194,7 @@ define inreg <16 x float> @bitcast_v16i32_to_v16f32_scalar(<16 x i32> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -234,8 +232,6 @@ define inreg <16 x float> @bitcast_v16i32_to_v16f32_scalar(<16 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v14, s7
; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v16i32_to_v16f32_scalar:
; GFX9: ; %bb.0:
@@ -244,7 +240,7 @@ define inreg <16 x float> @bitcast_v16i32_to_v16f32_scalar(<16 x i32> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -282,15 +278,13 @@ define inreg <16 x float> @bitcast_v16i32_to_v16f32_scalar(<16 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v14, s7
; GFX9-NEXT: v_mov_b32_e32 v15, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v16i32_to_v16f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -322,8 +316,6 @@ define inreg <16 x float> @bitcast_v16i32_to_v16f32_scalar(<16 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -481,9 +473,9 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s31, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s30, v0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v15, s31, 1.0
; SI-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -501,10 +493,8 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v2, s18, 1.0
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
-; SI-NEXT: s_branch .LBB3_5
+; SI-NEXT: s_branch .LBB3_4
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -521,7 +511,7 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: v_mov_b32_e32 v14, s30
; SI-NEXT: v_mov_b32_e32 v15, s31
-; SI-NEXT: .LBB3_5: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: v_readlane_b32 s31, v16, 1
; SI-NEXT: v_readlane_b32 s30, v16, 0
; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -542,9 +532,9 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v15, s31, 1.0
; VI-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -562,10 +552,8 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
-; VI-NEXT: s_branch .LBB3_5
+; VI-NEXT: s_branch .LBB3_4
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -582,7 +570,7 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB3_5: ; %end
+; VI-NEXT: .LBB3_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v16, 1
; VI-NEXT: v_readlane_b32 s30, v16, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -603,9 +591,9 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v15, s31, 1.0
; GFX9-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -623,10 +611,8 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
-; GFX9-NEXT: s_branch .LBB3_5
+; GFX9-NEXT: s_branch .LBB3_4
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -643,7 +629,7 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB3_5: ; %end
+; GFX9-NEXT: .LBB3_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -661,10 +647,10 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v15, s27, 1.0
; GFX11-NEXT: v_add_f32_e64 v14, s26, 1.0
@@ -684,8 +670,6 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -854,7 +838,7 @@ define inreg <8 x i64> @bitcast_v16i32_to_v8i64_scalar(<16 x i32> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s6, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s7, v0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB5_3
; SI-NEXT: .LBB5_2: ; %cmp.true
@@ -892,8 +876,6 @@ define inreg <8 x i64> @bitcast_v16i32_to_v8i64_scalar(<16 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v14, s7
; SI-NEXT: v_mov_b32_e32 v15, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v16i32_to_v8i64_scalar:
; VI: ; %bb.0:
@@ -902,7 +884,7 @@ define inreg <8 x i64> @bitcast_v16i32_to_v8i64_scalar(<16 x i32> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -940,8 +922,6 @@ define inreg <8 x i64> @bitcast_v16i32_to_v8i64_scalar(<16 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v14, s7
; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v16i32_to_v8i64_scalar:
; GFX9: ; %bb.0:
@@ -950,7 +930,7 @@ define inreg <8 x i64> @bitcast_v16i32_to_v8i64_scalar(<16 x i32> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -988,15 +968,13 @@ define inreg <8 x i64> @bitcast_v16i32_to_v8i64_scalar(<16 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v14, s7
; GFX9-NEXT: v_mov_b32_e32 v15, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v16i32_to_v8i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -1028,8 +1006,6 @@ define inreg <8 x i64> @bitcast_v16i32_to_v8i64_scalar(<16 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1193,7 +1169,7 @@ define inreg <16 x i32> @bitcast_v8i64_to_v16i32_scalar(<8 x i64> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s6, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s7, v0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB7_3
; SI-NEXT: .LBB7_2: ; %cmp.true
@@ -1231,8 +1207,6 @@ define inreg <16 x i32> @bitcast_v8i64_to_v16i32_scalar(<8 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v14, s7
; SI-NEXT: v_mov_b32_e32 v15, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v8i64_to_v16i32_scalar:
; VI: ; %bb.0:
@@ -1241,7 +1215,7 @@ define inreg <16 x i32> @bitcast_v8i64_to_v16i32_scalar(<8 x i64> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -1279,8 +1253,6 @@ define inreg <16 x i32> @bitcast_v8i64_to_v16i32_scalar(<8 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v14, s7
; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v8i64_to_v16i32_scalar:
; GFX9: ; %bb.0:
@@ -1289,7 +1261,7 @@ define inreg <16 x i32> @bitcast_v8i64_to_v16i32_scalar(<8 x i64> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
@@ -1327,15 +1299,13 @@ define inreg <16 x i32> @bitcast_v8i64_to_v16i32_scalar(<8 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v14, s7
; GFX9-NEXT: v_mov_b32_e32 v15, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v8i64_to_v16i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
@@ -1367,8 +1337,6 @@ define inreg <16 x i32> @bitcast_v8i64_to_v16i32_scalar(<8 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1528,7 +1496,7 @@ define inreg <8 x double> @bitcast_v16i32_to_v8f64_scalar(<16 x i32> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s6, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s7, v0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB9_3
; SI-NEXT: .LBB9_2: ; %cmp.true
@@ -1566,8 +1534,6 @@ define inreg <8 x double> @bitcast_v16i32_to_v8f64_scalar(<16 x i32> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v14, s7
; SI-NEXT: v_mov_b32_e32 v15, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v16i32_to_v8f64_scalar:
; VI: ; %bb.0:
@@ -1576,7 +1542,7 @@ define inreg <8 x double> @bitcast_v16i32_to_v8f64_scalar(<16 x i32> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -1614,8 +1580,6 @@ define inreg <8 x double> @bitcast_v16i32_to_v8f64_scalar(<16 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v14, s7
; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v16i32_to_v8f64_scalar:
; GFX9: ; %bb.0:
@@ -1624,7 +1588,7 @@ define inreg <8 x double> @bitcast_v16i32_to_v8f64_scalar(<16 x i32> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -1662,15 +1626,13 @@ define inreg <8 x double> @bitcast_v16i32_to_v8f64_scalar(<16 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v14, s7
; GFX9-NEXT: v_mov_b32_e32 v15, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v16i32_to_v8f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -1702,8 +1664,6 @@ define inreg <8 x double> @bitcast_v16i32_to_v8f64_scalar(<16 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1837,9 +1797,9 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s31, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s30, v0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
; SI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -1849,10 +1809,8 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
-; SI-NEXT: s_branch .LBB11_5
+; SI-NEXT: s_branch .LBB11_4
; SI-NEXT: .LBB11_3:
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -1869,7 +1827,7 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: v_mov_b32_e32 v14, s30
; SI-NEXT: v_mov_b32_e32 v15, s31
-; SI-NEXT: .LBB11_5: ; %end
+; SI-NEXT: .LBB11_4: ; %end
; SI-NEXT: v_readlane_b32 s31, v16, 1
; SI-NEXT: v_readlane_b32 s30, v16, 0
; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -1890,9 +1848,9 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
; VI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -1902,10 +1860,8 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
-; VI-NEXT: s_branch .LBB11_5
+; VI-NEXT: s_branch .LBB11_4
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1922,7 +1878,7 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB11_5: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v16, 1
; VI-NEXT: v_readlane_b32 s30, v16, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -1943,9 +1899,9 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -1955,10 +1911,8 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
-; GFX9-NEXT: s_branch .LBB11_5
+; GFX9-NEXT: s_branch .LBB11_4
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1975,7 +1929,7 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB11_5: ; %end
+; GFX9-NEXT: .LBB11_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -1993,10 +1947,10 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
@@ -2008,8 +1962,6 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -2277,7 +2229,7 @@ define inreg <32 x i16> @bitcast_v16i32_to_v32i16_scalar(<16 x i32> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s5, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v0
-; SI-NEXT: s_cbranch_scc0 .LBB13_4
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s56, s5, 16
; SI-NEXT: s_lshr_b32 s57, s29, 16
@@ -2395,24 +2347,6 @@ define inreg <32 x i16> @bitcast_v16i32_to_v32i16_scalar(<16 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v14, s4
; SI-NEXT: v_mov_b32_e32 v15, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB13_4:
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: s_branch .LBB13_2
;
; VI-LABEL: bitcast_v16i32_to_v32i16_scalar:
; VI: ; %bb.0:
@@ -2421,7 +2355,7 @@ define inreg <32 x i16> @bitcast_v16i32_to_v32i16_scalar(<16 x i32> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
@@ -2459,8 +2393,6 @@ define inreg <32 x i16> @bitcast_v16i32_to_v32i16_scalar(<16 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v14, s7
; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v16i32_to_v32i16_scalar:
; GFX9: ; %bb.0:
@@ -2469,7 +2401,7 @@ define inreg <32 x i16> @bitcast_v16i32_to_v32i16_scalar(<16 x i32> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
@@ -2507,15 +2439,13 @@ define inreg <32 x i16> @bitcast_v16i32_to_v32i16_scalar(<16 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v14, s7
; GFX9-NEXT: v_mov_b32_e32 v15, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v16i32_to_v32i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
@@ -2547,8 +2477,6 @@ define inreg <32 x i16> @bitcast_v16i32_to_v32i16_scalar(<16 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2938,7 +2866,7 @@ define inreg <16 x i32> @bitcast_v32i16_to_v16i32_scalar(<32 x i16> inreg %a, i3
; SI-NEXT: v_writelane_b32 v16, s50, 6
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v16, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s63, 16
@@ -3100,9 +3028,6 @@ define inreg <16 x i32> @bitcast_v32i16_to_v16i32_scalar(<32 x i16> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v32i16_to_v16i32_scalar:
; VI: ; %bb.0:
@@ -3111,7 +3036,7 @@ define inreg <16 x i32> @bitcast_v32i16_to_v16i32_scalar(<32 x i16> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB15_3
; VI-NEXT: .LBB15_2: ; %cmp.true
@@ -3213,8 +3138,6 @@ define inreg <16 x i32> @bitcast_v32i16_to_v16i32_scalar(<32 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v14, s7
; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v32i16_to_v16i32_scalar:
; GFX9: ; %bb.0:
@@ -3228,9 +3151,9 @@ define inreg <16 x i32> @bitcast_v32i16_to_v16i32_scalar(<32 x i16> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, s31, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, s30, 3 op_sel_hi:[1,0]
@@ -3248,10 +3171,8 @@ define inreg <16 x i32> @bitcast_v32i16_to_v16i32_scalar(<32 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB15_5
+; GFX9-NEXT: s_branch .LBB15_4
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -3268,7 +3189,7 @@ define inreg <16 x i32> @bitcast_v32i16_to_v16i32_scalar(<32 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB15_5: ; %end
+; GFX9-NEXT: .LBB15_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -3286,10 +3207,10 @@ define inreg <16 x i32> @bitcast_v32i16_to_v16i32_scalar(<32 x i16> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
@@ -3309,8 +3230,6 @@ define inreg <16 x i32> @bitcast_v32i16_to_v16i32_scalar(<32 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -3578,7 +3497,7 @@ define inreg <32 x half> @bitcast_v16i32_to_v32f16_scalar(<16 x i32> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s5, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v0
-; SI-NEXT: s_cbranch_scc0 .LBB17_4
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s56, s5, 16
; SI-NEXT: s_lshr_b32 s57, s29, 16
@@ -3696,24 +3615,6 @@ define inreg <32 x half> @bitcast_v16i32_to_v32f16_scalar(<16 x i32> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v14, s4
; SI-NEXT: v_mov_b32_e32 v15, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB17_4:
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: s_branch .LBB17_2
;
; VI-LABEL: bitcast_v16i32_to_v32f16_scalar:
; VI: ; %bb.0:
@@ -3722,7 +3623,7 @@ define inreg <32 x half> @bitcast_v16i32_to_v32f16_scalar(<16 x i32> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
@@ -3760,8 +3661,6 @@ define inreg <32 x half> @bitcast_v16i32_to_v32f16_scalar(<16 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v14, s7
; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v16i32_to_v32f16_scalar:
; GFX9: ; %bb.0:
@@ -3770,7 +3669,7 @@ define inreg <32 x half> @bitcast_v16i32_to_v32f16_scalar(<16 x i32> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
@@ -3808,15 +3707,13 @@ define inreg <32 x half> @bitcast_v16i32_to_v32f16_scalar(<16 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v14, s7
; GFX9-NEXT: v_mov_b32_e32 v15, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v16i32_to_v32f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
@@ -3848,8 +3745,6 @@ define inreg <32 x half> @bitcast_v16i32_to_v32f16_scalar(<16 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4303,7 +4198,7 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; SI-NEXT: v_writelane_b32 v18, s50, 6
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v18, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s63, 16
@@ -4353,7 +4248,7 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s51, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB19_4
+; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s63
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -4483,11 +4378,8 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v16
; SI-NEXT: v_or_b32_e32 v15, v17, v15
-; SI-NEXT: s_branch .LBB19_5
+; SI-NEXT: s_branch .LBB19_4
; SI-NEXT: .LBB19_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB19_2
-; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -4504,7 +4396,7 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v13, s49
; SI-NEXT: v_mov_b32_e32 v14, s50
; SI-NEXT: v_mov_b32_e32 v15, s51
-; SI-NEXT: .LBB19_5: ; %end
+; SI-NEXT: .LBB19_4: ; %end
; SI-NEXT: v_readlane_b32 s51, v18, 7
; SI-NEXT: v_readlane_b32 s50, v18, 6
; SI-NEXT: v_readlane_b32 s49, v18, 5
@@ -4531,9 +4423,9 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
+; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s31, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -4616,10 +4508,8 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; VI-NEXT: v_add_f16_sdwa v16, v16, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v16
-; VI-NEXT: s_branch .LBB19_5
+; VI-NEXT: s_branch .LBB19_4
; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
-; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -4636,7 +4526,7 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB19_5: ; %end
+; VI-NEXT: .LBB19_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v17, 1
; VI-NEXT: v_readlane_b32 s30, v17, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -4657,9 +4547,9 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, s31, v0 op_sel_hi:[1,0]
@@ -4678,10 +4568,8 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB19_5
+; GFX9-NEXT: s_branch .LBB19_4
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -4698,7 +4586,7 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB19_5: ; %end
+; GFX9-NEXT: .LBB19_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -4716,10 +4604,10 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
@@ -4739,8 +4627,6 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -5088,7 +4974,7 @@ define inreg <32 x bfloat> @bitcast_v16i32_to_v32bf16_scalar(<16 x i32> inreg %a
; SI-NEXT: v_readfirstlane_b32 s78, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s79, v0
-; SI-NEXT: s_cbranch_scc0 .LBB21_4
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s7, s78, 0xffff0000
; SI-NEXT: s_lshl_b32 s6, s78, 16
@@ -5238,40 +5124,6 @@ define inreg <32 x bfloat> @bitcast_v16i32_to_v32bf16_scalar(<16 x i32> inreg %a
; SI-NEXT: v_mul_f32_e64 v15, 1.0, s6
; SI-NEXT: v_lshr_b64 v[15:16], v[15:16], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_4:
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB21_2
;
; VI-LABEL: bitcast_v16i32_to_v32bf16_scalar:
; VI: ; %bb.0:
@@ -5280,7 +5132,7 @@ define inreg <32 x bfloat> @bitcast_v16i32_to_v32bf16_scalar(<16 x i32> inreg %a
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
@@ -5318,8 +5170,6 @@ define inreg <32 x bfloat> @bitcast_v16i32_to_v32bf16_scalar(<16 x i32> inreg %a
; VI-NEXT: v_mov_b32_e32 v14, s7
; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v16i32_to_v32bf16_scalar:
; GFX9: ; %bb.0:
@@ -5328,7 +5178,7 @@ define inreg <32 x bfloat> @bitcast_v16i32_to_v32bf16_scalar(<16 x i32> inreg %a
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
@@ -5366,15 +5216,13 @@ define inreg <32 x bfloat> @bitcast_v16i32_to_v32bf16_scalar(<16 x i32> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v14, s7
; GFX9-NEXT: v_mov_b32_e32 v15, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_v16i32_to_v32bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
@@ -5406,8 +5254,6 @@ define inreg <32 x bfloat> @bitcast_v16i32_to_v32bf16_scalar(<16 x i32> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_4:
-; GFX11-NEXT: s_branch .LBB21_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6920,7 +6766,7 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; SI-NEXT: v_mul_f32_e64 v21, 1.0, s5
; SI-NEXT: v_mul_f32_e64 v19, 1.0, s44
; SI-NEXT: v_mul_f32_e64 v17, 1.0, s42
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v41
; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v63
@@ -7073,9 +6919,6 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v32bf16_to_v16i32_scalar:
; VI: ; %bb.0:
@@ -7089,9 +6932,9 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB23_3
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_4
+; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v16, 0x40c00000
; VI-NEXT: s_lshl_b32 s4, s30, 16
@@ -7390,10 +7233,8 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; VI-NEXT: v_lshrrev_b64 v[16:17], 16, v[17:18]
; VI-NEXT: v_mov_b32_e32 v13, v15
; VI-NEXT: v_mov_b32_e32 v15, v16
-; VI-NEXT: s_branch .LBB23_5
+; VI-NEXT: s_branch .LBB23_4
; VI-NEXT: .LBB23_3:
-; VI-NEXT: s_branch .LBB23_2
-; VI-NEXT: .LBB23_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -7410,7 +7251,7 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB23_5: ; %end
+; VI-NEXT: .LBB23_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v20, 1
; VI-NEXT: v_readlane_b32 s30, v20, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -7431,9 +7272,9 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_4
+; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s31, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -7741,10 +7582,8 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; GFX9-NEXT: v_cndmask_b32_e32 v17, v18, v19, vcc
; GFX9-NEXT: v_and_b32_sdwa v16, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v16
-; GFX9-NEXT: s_branch .LBB23_5
+; GFX9-NEXT: s_branch .LBB23_4
; GFX9-NEXT: .LBB23_3:
-; GFX9-NEXT: s_branch .LBB23_2
-; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -7761,7 +7600,7 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB23_5: ; %end
+; GFX9-NEXT: .LBB23_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v20, 1
; GFX9-NEXT: v_readlane_b32 s30, v20, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -7779,10 +7618,10 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB23_3
; GFX11-TRUE16-NEXT: .LBB23_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s0, s27, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s27, 16
@@ -8094,8 +7933,6 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v17.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB23_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB23_2
-; GFX11-TRUE16-NEXT: .LBB23_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -8115,10 +7952,10 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB23_3
; GFX11-FAKE16-NEXT: .LBB23_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_and_b32 s1, s27, 0xffff0000
; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s27, 16
@@ -8449,8 +8286,6 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v20, 16, v17
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB23_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB23_2
-; GFX11-FAKE16-NEXT: .LBB23_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -9986,7 +9821,7 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v1
; SI-NEXT: v_writelane_b32 v4, s85, 29
-; SI-NEXT: s_cbranch_scc0 .LBB25_4
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s38, s5, 24
; SI-NEXT: s_lshr_b32 s39, s5, 16
@@ -10342,56 +10177,6 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB25_4:
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr85
-; SI-NEXT: ; implicit-def: $sgpr84
-; SI-NEXT: ; implicit-def: $sgpr83
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr82
-; SI-NEXT: ; implicit-def: $sgpr81
-; SI-NEXT: ; implicit-def: $sgpr80
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr71
-; SI-NEXT: ; implicit-def: $sgpr70
-; SI-NEXT: ; implicit-def: $sgpr69
-; SI-NEXT: ; implicit-def: $sgpr68
-; SI-NEXT: ; implicit-def: $sgpr67
-; SI-NEXT: ; implicit-def: $sgpr66
-; SI-NEXT: ; implicit-def: $sgpr65
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB25_2
;
; VI-LABEL: bitcast_v16i32_to_v64i8_scalar:
; VI: ; %bb.0:
@@ -10423,7 +10208,7 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s4, v1
; VI-NEXT: v_writelane_b32 v4, s67, 19
-; VI-NEXT: s_cbranch_scc0 .LBB25_4
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s56, s5, 24
; VI-NEXT: s_lshr_b32 s57, s5, 16
@@ -10693,56 +10478,6 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB25_4:
-; VI-NEXT: ; implicit-def: $sgpr67
-; VI-NEXT: ; implicit-def: $sgpr66
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr65
-; VI-NEXT: ; implicit-def: $sgpr64
-; VI-NEXT: ; implicit-def: $sgpr55
-; VI-NEXT: ; implicit-def: $sgpr54
-; VI-NEXT: ; implicit-def: $sgpr53
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr52
-; VI-NEXT: ; implicit-def: $sgpr51
-; VI-NEXT: ; implicit-def: $sgpr50
-; VI-NEXT: ; implicit-def: $sgpr49
-; VI-NEXT: ; implicit-def: $sgpr48
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr39
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr37
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: s_branch .LBB25_2
;
; GFX9-LABEL: bitcast_v16i32_to_v64i8_scalar:
; GFX9: ; %bb.0:
@@ -10770,7 +10505,7 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s4, v1
; GFX9-NEXT: v_writelane_b32 v4, s55, 15
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s56, s5, 24
; GFX9-NEXT: s_lshr_b32 s57, s5, 16
@@ -11021,56 +10756,6 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB25_4:
-; GFX9-NEXT: ; implicit-def: $sgpr55
-; GFX9-NEXT: ; implicit-def: $sgpr54
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr53
-; GFX9-NEXT: ; implicit-def: $sgpr52
-; GFX9-NEXT: ; implicit-def: $sgpr51
-; GFX9-NEXT: ; implicit-def: $sgpr50
-; GFX9-NEXT: ; implicit-def: $sgpr49
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr48
-; GFX9-NEXT: ; implicit-def: $sgpr39
-; GFX9-NEXT: ; implicit-def: $sgpr38
-; GFX9-NEXT: ; implicit-def: $sgpr37
-; GFX9-NEXT: ; implicit-def: $sgpr36
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr35
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr31
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: s_branch .LBB25_2
;
; GFX11-LABEL: bitcast_v16i32_to_v64i8_scalar:
; GFX11: ; %bb.0:
@@ -11089,7 +10774,7 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32
; GFX11-NEXT: v_writelane_b32 v23, s38, 6
; GFX11-NEXT: v_writelane_b32 v23, s39, 7
; GFX11-NEXT: v_writelane_b32 v23, s48, 8
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s42, s27, 24
; GFX11-NEXT: s_lshr_b32 s44, s27, 16
@@ -11292,56 +10977,6 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32
; GFX11-NEXT: s_mov_b32 exec_lo, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB25_4:
-; GFX11-NEXT: ; implicit-def: $sgpr48
-; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr37
-; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr35
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr31
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $vcc_hi
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr95
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr91
-; GFX11-NEXT: ; implicit-def: $sgpr93
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr89
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr79
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr77
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr75
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: s_branch .LBB25_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13709,7 +13344,7 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v37
; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v27, 8, v27
-; SI-NEXT: s_cbranch_scc0 .LBB27_4
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s6, 0xff
; SI-NEXT: v_or_b32_e32 v0, s4, v34
@@ -14073,9 +13708,6 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: .LBB27_3: ; %end
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB27_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB27_2
;
; VI-LABEL: bitcast_v64i8_to_v16i32_scalar:
; VI: ; %bb.0:
@@ -14134,7 +13766,7 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v35
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB27_4
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v11, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -14384,9 +14016,6 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: .LBB27_3: ; %end
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB27_2
;
; GFX9-LABEL: bitcast_v64i8_to_v16i32_scalar:
; GFX9: ; %bb.0:
@@ -14445,7 +14074,7 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: s_waitcnt vmcnt(19)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v35
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v11, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -14684,9 +14313,6 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: .LBB27_3: ; %end
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB27_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB27_2
;
; GFX11-TRUE16-LABEL: bitcast_v64i8_to_v16i32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -14743,7 +14369,7 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v54
; GFX11-TRUE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v11, 0xc0c0004
; GFX11-TRUE16-NEXT: v_perm_b32 v16, v37, v34, 0xc0c0004
@@ -14968,9 +14594,6 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: .LBB27_3: ; %end
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB27_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB27_2
;
; GFX11-FAKE16-LABEL: bitcast_v64i8_to_v16i32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -15027,7 +14650,7 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v54
; GFX11-FAKE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v11, 0xc0c0004
; GFX11-FAKE16-NEXT: v_perm_b32 v16, v37, v34, 0xc0c0004
@@ -15252,9 +14875,6 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: .LBB27_3: ; %end
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB27_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB27_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15412,9 +15032,9 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s31, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s30, v0
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB29_4
+; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v15, s31, 1.0
; SI-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -15432,10 +15052,8 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; SI-NEXT: v_add_f32_e64 v2, s18, 1.0
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
-; SI-NEXT: s_branch .LBB29_5
+; SI-NEXT: s_branch .LBB29_4
; SI-NEXT: .LBB29_3:
-; SI-NEXT: s_branch .LBB29_2
-; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -15452,7 +15070,7 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: v_mov_b32_e32 v14, s30
; SI-NEXT: v_mov_b32_e32 v15, s31
-; SI-NEXT: .LBB29_5: ; %end
+; SI-NEXT: .LBB29_4: ; %end
; SI-NEXT: v_readlane_b32 s31, v16, 1
; SI-NEXT: v_readlane_b32 s30, v16, 0
; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -15473,9 +15091,9 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
+; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v15, s31, 1.0
; VI-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -15493,10 +15111,8 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
-; VI-NEXT: s_branch .LBB29_5
+; VI-NEXT: s_branch .LBB29_4
; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
-; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -15513,7 +15129,7 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB29_5: ; %end
+; VI-NEXT: .LBB29_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v16, 1
; VI-NEXT: v_readlane_b32 s30, v16, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -15534,9 +15150,9 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
+; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v15, s31, 1.0
; GFX9-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -15554,10 +15170,8 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
-; GFX9-NEXT: s_branch .LBB29_5
+; GFX9-NEXT: s_branch .LBB29_4
; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
-; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -15574,7 +15188,7 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB29_5: ; %end
+; GFX9-NEXT: .LBB29_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -15592,10 +15206,10 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-NEXT: .LBB29_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v15, s27, 1.0
; GFX11-NEXT: v_add_f32_e64 v14, s26, 1.0
@@ -15615,8 +15229,6 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
-; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -15789,7 +15401,7 @@ define inreg <16 x float> @bitcast_v8i64_to_v16f32_scalar(<8 x i64> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s6, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s7, v0
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB31_3
; SI-NEXT: .LBB31_2: ; %cmp.true
@@ -15827,8 +15439,6 @@ define inreg <16 x float> @bitcast_v8i64_to_v16f32_scalar(<8 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v14, s7
; SI-NEXT: v_mov_b32_e32 v15, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v8i64_to_v16f32_scalar:
; VI: ; %bb.0:
@@ -15837,7 +15447,7 @@ define inreg <16 x float> @bitcast_v8i64_to_v16f32_scalar(<8 x i64> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB31_3
; VI-NEXT: .LBB31_2: ; %cmp.true
@@ -15875,8 +15485,6 @@ define inreg <16 x float> @bitcast_v8i64_to_v16f32_scalar(<8 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v14, s7
; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v8i64_to_v16f32_scalar:
; GFX9: ; %bb.0:
@@ -15885,7 +15493,7 @@ define inreg <16 x float> @bitcast_v8i64_to_v16f32_scalar(<8 x i64> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
@@ -15923,15 +15531,13 @@ define inreg <16 x float> @bitcast_v8i64_to_v16f32_scalar(<8 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v14, s7
; GFX9-NEXT: v_mov_b32_e32 v15, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB31_4:
-; GFX9-NEXT: s_branch .LBB31_2
;
; GFX11-LABEL: bitcast_v8i64_to_v16f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
@@ -15963,8 +15569,6 @@ define inreg <16 x float> @bitcast_v8i64_to_v16f32_scalar(<8 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB31_4:
-; GFX11-NEXT: s_branch .LBB31_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16122,9 +15726,9 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s31, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s30, v0
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB33_4
+; SI-NEXT: s_cbranch_execnz .LBB33_3
; SI-NEXT: .LBB33_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v15, s31, 1.0
; SI-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -16142,10 +15746,8 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v2, s18, 1.0
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
-; SI-NEXT: s_branch .LBB33_5
+; SI-NEXT: s_branch .LBB33_4
; SI-NEXT: .LBB33_3:
-; SI-NEXT: s_branch .LBB33_2
-; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -16162,7 +15764,7 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: v_mov_b32_e32 v14, s30
; SI-NEXT: v_mov_b32_e32 v15, s31
-; SI-NEXT: .LBB33_5: ; %end
+; SI-NEXT: .LBB33_4: ; %end
; SI-NEXT: v_readlane_b32 s31, v16, 1
; SI-NEXT: v_readlane_b32 s30, v16, 0
; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -16183,9 +15785,9 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_4
+; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v15, s31, 1.0
; VI-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -16203,10 +15805,8 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
-; VI-NEXT: s_branch .LBB33_5
+; VI-NEXT: s_branch .LBB33_4
; VI-NEXT: .LBB33_3:
-; VI-NEXT: s_branch .LBB33_2
-; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -16223,7 +15823,7 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB33_5: ; %end
+; VI-NEXT: .LBB33_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v16, 1
; VI-NEXT: v_readlane_b32 s30, v16, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -16244,9 +15844,9 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
+; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v15, s31, 1.0
; GFX9-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -16264,10 +15864,8 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
-; GFX9-NEXT: s_branch .LBB33_5
+; GFX9-NEXT: s_branch .LBB33_4
; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: s_branch .LBB33_2
-; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -16284,7 +15882,7 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB33_5: ; %end
+; GFX9-NEXT: .LBB33_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -16302,10 +15900,10 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-NEXT: .LBB33_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v15, s27, 1.0
; GFX11-NEXT: v_add_f32_e64 v14, s26, 1.0
@@ -16325,8 +15923,6 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: s_branch .LBB33_2
-; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -16469,9 +16065,9 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s31, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s30, v0
-; SI-NEXT: s_cbranch_scc0 .LBB35_3
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB35_4
+; SI-NEXT: s_cbranch_execnz .LBB35_3
; SI-NEXT: .LBB35_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
; SI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -16481,10 +16077,8 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
-; SI-NEXT: s_branch .LBB35_5
+; SI-NEXT: s_branch .LBB35_4
; SI-NEXT: .LBB35_3:
-; SI-NEXT: s_branch .LBB35_2
-; SI-NEXT: .LBB35_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -16501,7 +16095,7 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: v_mov_b32_e32 v14, s30
; SI-NEXT: v_mov_b32_e32 v15, s31
-; SI-NEXT: .LBB35_5: ; %end
+; SI-NEXT: .LBB35_4: ; %end
; SI-NEXT: v_readlane_b32 s31, v16, 1
; SI-NEXT: v_readlane_b32 s30, v16, 0
; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -16522,9 +16116,9 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB35_4
+; VI-NEXT: s_cbranch_execnz .LBB35_3
; VI-NEXT: .LBB35_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
; VI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -16534,10 +16128,8 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
-; VI-NEXT: s_branch .LBB35_5
+; VI-NEXT: s_branch .LBB35_4
; VI-NEXT: .LBB35_3:
-; VI-NEXT: s_branch .LBB35_2
-; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -16554,7 +16146,7 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB35_5: ; %end
+; VI-NEXT: .LBB35_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v16, 1
; VI-NEXT: v_readlane_b32 s30, v16, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -16575,9 +16167,9 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
+; GFX9-NEXT: s_cbranch_execnz .LBB35_3
; GFX9-NEXT: .LBB35_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -16587,10 +16179,8 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
-; GFX9-NEXT: s_branch .LBB35_5
+; GFX9-NEXT: s_branch .LBB35_4
; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
-; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -16607,7 +16197,7 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB35_5: ; %end
+; GFX9-NEXT: .LBB35_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -16625,10 +16215,10 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-NEXT: .LBB35_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
@@ -16640,8 +16230,6 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
-; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -16901,7 +16489,7 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s5, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v0
-; SI-NEXT: s_cbranch_scc0 .LBB37_3
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s63, s5, 16
; SI-NEXT: s_lshr_b32 s62, s29, 16
@@ -16919,7 +16507,7 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB37_4
+; SI-NEXT: s_cbranch_execnz .LBB37_3
; SI-NEXT: .LBB37_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v15, s5, 1.0
; SI-NEXT: v_add_f32_e64 v14, s4, 1.0
@@ -16953,26 +16541,8 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v1
-; SI-NEXT: s_branch .LBB37_5
+; SI-NEXT: s_branch .LBB37_4
; SI-NEXT: .LBB37_3:
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: s_branch .LBB37_2
-; SI-NEXT: .LBB37_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -17005,7 +16575,7 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v21, s40
; SI-NEXT: v_mov_b32_e32 v22, s42
; SI-NEXT: v_mov_b32_e32 v23, s44
-; SI-NEXT: .LBB37_5: ; %end
+; SI-NEXT: .LBB37_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
@@ -17068,9 +16638,9 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB37_3
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_4
+; VI-NEXT: s_cbranch_execnz .LBB37_3
; VI-NEXT: .LBB37_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v15, s31, 1.0
; VI-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -17088,10 +16658,8 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
-; VI-NEXT: s_branch .LBB37_5
+; VI-NEXT: s_branch .LBB37_4
; VI-NEXT: .LBB37_3:
-; VI-NEXT: s_branch .LBB37_2
-; VI-NEXT: .LBB37_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -17108,7 +16676,7 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB37_5: ; %end
+; VI-NEXT: .LBB37_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v16, 1
; VI-NEXT: v_readlane_b32 s30, v16, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -17129,9 +16697,9 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_4
+; GFX9-NEXT: s_cbranch_execnz .LBB37_3
; GFX9-NEXT: .LBB37_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v15, s31, 1.0
; GFX9-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -17149,10 +16717,8 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
-; GFX9-NEXT: s_branch .LBB37_5
+; GFX9-NEXT: s_branch .LBB37_4
; GFX9-NEXT: .LBB37_3:
-; GFX9-NEXT: s_branch .LBB37_2
-; GFX9-NEXT: .LBB37_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -17169,7 +16735,7 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB37_5: ; %end
+; GFX9-NEXT: .LBB37_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -17187,10 +16753,10 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
; GFX11-NEXT: .LBB37_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v15, s27, 1.0
; GFX11-NEXT: v_add_f32_e64 v14, s26, 1.0
@@ -17210,8 +16776,6 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: s_branch .LBB37_2
-; GFX11-NEXT: .LBB37_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -17610,7 +17174,7 @@ define inreg <16 x float> @bitcast_v32i16_to_v16f32_scalar(<32 x i16> inreg %a,
; SI-NEXT: v_writelane_b32 v16, s50, 6
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v16, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB39_4
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s63, 16
@@ -17772,9 +17336,6 @@ define inreg <16 x float> @bitcast_v32i16_to_v16f32_scalar(<32 x i16> inreg %a,
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB39_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB39_2
;
; VI-LABEL: bitcast_v32i16_to_v16f32_scalar:
; VI: ; %bb.0:
@@ -17783,7 +17344,7 @@ define inreg <16 x float> @bitcast_v32i16_to_v16f32_scalar(<32 x i16> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB39_4
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB39_3
; VI-NEXT: .LBB39_2: ; %cmp.true
@@ -17885,8 +17446,6 @@ define inreg <16 x float> @bitcast_v32i16_to_v16f32_scalar(<32 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v14, s7
; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_4:
-; VI-NEXT: s_branch .LBB39_2
;
; GFX9-LABEL: bitcast_v32i16_to_v16f32_scalar:
; GFX9: ; %bb.0:
@@ -17900,9 +17459,9 @@ define inreg <16 x float> @bitcast_v32i16_to_v16f32_scalar(<32 x i16> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
+; GFX9-NEXT: s_cbranch_execnz .LBB39_3
; GFX9-NEXT: .LBB39_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, s31, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, s30, 3 op_sel_hi:[1,0]
@@ -17920,10 +17479,8 @@ define inreg <16 x float> @bitcast_v32i16_to_v16f32_scalar(<32 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB39_5
+; GFX9-NEXT: s_branch .LBB39_4
; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
-; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -17940,7 +17497,7 @@ define inreg <16 x float> @bitcast_v32i16_to_v16f32_scalar(<32 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB39_5: ; %end
+; GFX9-NEXT: .LBB39_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -17958,10 +17515,10 @@ define inreg <16 x float> @bitcast_v32i16_to_v16f32_scalar(<32 x i16> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_3
; GFX11-NEXT: .LBB39_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
@@ -17981,8 +17538,6 @@ define inreg <16 x float> @bitcast_v32i16_to_v16f32_scalar(<32 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
-; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -18242,7 +17797,7 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s5, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v0
-; SI-NEXT: s_cbranch_scc0 .LBB41_3
+; SI-NEXT: s_cbranch_scc0 .LBB41_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s63, s5, 16
; SI-NEXT: s_lshr_b32 s62, s29, 16
@@ -18260,7 +17815,7 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB41_4
+; SI-NEXT: s_cbranch_execnz .LBB41_3
; SI-NEXT: .LBB41_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v15, s5, 1.0
; SI-NEXT: v_add_f32_e64 v14, s4, 1.0
@@ -18294,26 +17849,8 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v1
-; SI-NEXT: s_branch .LBB41_5
+; SI-NEXT: s_branch .LBB41_4
; SI-NEXT: .LBB41_3:
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: s_branch .LBB41_2
-; SI-NEXT: .LBB41_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -18346,7 +17883,7 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v21, s40
; SI-NEXT: v_mov_b32_e32 v22, s42
; SI-NEXT: v_mov_b32_e32 v23, s44
-; SI-NEXT: .LBB41_5: ; %end
+; SI-NEXT: .LBB41_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
@@ -18409,9 +17946,9 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB41_3
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB41_4
+; VI-NEXT: s_cbranch_execnz .LBB41_3
; VI-NEXT: .LBB41_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v15, s31, 1.0
; VI-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -18429,10 +17966,8 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
-; VI-NEXT: s_branch .LBB41_5
+; VI-NEXT: s_branch .LBB41_4
; VI-NEXT: .LBB41_3:
-; VI-NEXT: s_branch .LBB41_2
-; VI-NEXT: .LBB41_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -18449,7 +17984,7 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB41_5: ; %end
+; VI-NEXT: .LBB41_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v16, 1
; VI-NEXT: v_readlane_b32 s30, v16, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -18470,9 +18005,9 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB41_4
+; GFX9-NEXT: s_cbranch_execnz .LBB41_3
; GFX9-NEXT: .LBB41_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v15, s31, 1.0
; GFX9-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -18490,10 +18025,8 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
-; GFX9-NEXT: s_branch .LBB41_5
+; GFX9-NEXT: s_branch .LBB41_4
; GFX9-NEXT: .LBB41_3:
-; GFX9-NEXT: s_branch .LBB41_2
-; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -18510,7 +18043,7 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB41_5: ; %end
+; GFX9-NEXT: .LBB41_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -18528,10 +18061,10 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
; GFX11-NEXT: .LBB41_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v15, s27, 1.0
; GFX11-NEXT: v_add_f32_e64 v14, s26, 1.0
@@ -18551,8 +18084,6 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB41_3:
-; GFX11-NEXT: s_branch .LBB41_2
-; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -19015,7 +18546,7 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; SI-NEXT: v_writelane_b32 v18, s50, 6
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v18, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB43_3
+; SI-NEXT: s_cbranch_scc0 .LBB43_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s63, 16
@@ -19065,7 +18596,7 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s51, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB43_4
+; SI-NEXT: s_cbranch_execnz .LBB43_3
; SI-NEXT: .LBB43_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s63
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -19195,11 +18726,8 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v16
; SI-NEXT: v_or_b32_e32 v15, v17, v15
-; SI-NEXT: s_branch .LBB43_5
+; SI-NEXT: s_branch .LBB43_4
; SI-NEXT: .LBB43_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB43_2
-; SI-NEXT: .LBB43_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -19216,7 +18744,7 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v13, s49
; SI-NEXT: v_mov_b32_e32 v14, s50
; SI-NEXT: v_mov_b32_e32 v15, s51
-; SI-NEXT: .LBB43_5: ; %end
+; SI-NEXT: .LBB43_4: ; %end
; SI-NEXT: v_readlane_b32 s51, v18, 7
; SI-NEXT: v_readlane_b32 s50, v18, 6
; SI-NEXT: v_readlane_b32 s49, v18, 5
@@ -19243,9 +18771,9 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB43_3
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB43_4
+; VI-NEXT: s_cbranch_execnz .LBB43_3
; VI-NEXT: .LBB43_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s31, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -19328,10 +18856,8 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; VI-NEXT: v_add_f16_sdwa v16, v16, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v16
-; VI-NEXT: s_branch .LBB43_5
+; VI-NEXT: s_branch .LBB43_4
; VI-NEXT: .LBB43_3:
-; VI-NEXT: s_branch .LBB43_2
-; VI-NEXT: .LBB43_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -19348,7 +18874,7 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB43_5: ; %end
+; VI-NEXT: .LBB43_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v17, 1
; VI-NEXT: v_readlane_b32 s30, v17, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -19369,9 +18895,9 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
+; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, s31, v0 op_sel_hi:[1,0]
@@ -19390,10 +18916,8 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB43_5
+; GFX9-NEXT: s_branch .LBB43_4
; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
-; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -19410,7 +18934,7 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB43_5: ; %end
+; GFX9-NEXT: .LBB43_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -19428,10 +18952,10 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-NEXT: .LBB43_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
@@ -19451,8 +18975,6 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
-; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -19792,7 +19314,7 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; SI-NEXT: v_readfirstlane_b32 s6, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s7, v0
-; SI-NEXT: s_cbranch_scc0 .LBB45_3
+; SI-NEXT: s_cbranch_scc0 .LBB45_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s8, s6, 0xffff0000
; SI-NEXT: s_lshl_b32 s9, s6, 16
@@ -19826,7 +19348,7 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; SI-NEXT: s_lshl_b32 s77, s17, 16
; SI-NEXT: s_and_b32 s78, s16, 0xffff0000
; SI-NEXT: s_lshl_b32 s79, s16, 16
-; SI-NEXT: s_cbranch_execnz .LBB45_4
+; SI-NEXT: s_cbranch_execnz .LBB45_3
; SI-NEXT: .LBB45_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v20, s21, 1.0
; SI-NEXT: v_add_f32_e64 v18, s22, 1.0
@@ -19876,42 +19398,8 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28
; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v30
; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30
-; SI-NEXT: s_branch .LBB45_5
+; SI-NEXT: s_branch .LBB45_4
; SI-NEXT: .LBB45_3:
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB45_2
-; SI-NEXT: .LBB45_4:
; SI-NEXT: v_mov_b32_e32 v30, s79
; SI-NEXT: v_mov_b32_e32 v31, s78
; SI-NEXT: v_mov_b32_e32 v28, s77
@@ -19944,7 +19432,7 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; SI-NEXT: v_mov_b32_e32 v3, s10
; SI-NEXT: v_mov_b32_e32 v0, s9
; SI-NEXT: v_mov_b32_e32 v1, s8
-; SI-NEXT: .LBB45_5: ; %end
+; SI-NEXT: .LBB45_4: ; %end
; SI-NEXT: v_mul_f32_e32 v31, 1.0, v31
; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v31
; SI-NEXT: v_mul_f32_e32 v30, 1.0, v30
@@ -20023,9 +19511,9 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB45_3
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB45_4
+; VI-NEXT: s_cbranch_execnz .LBB45_3
; VI-NEXT: .LBB45_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v15, s31, 1.0
; VI-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -20043,10 +19531,8 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
-; VI-NEXT: s_branch .LBB45_5
+; VI-NEXT: s_branch .LBB45_4
; VI-NEXT: .LBB45_3:
-; VI-NEXT: s_branch .LBB45_2
-; VI-NEXT: .LBB45_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -20063,7 +19549,7 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB45_5: ; %end
+; VI-NEXT: .LBB45_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v16, 1
; VI-NEXT: v_readlane_b32 s30, v16, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -20084,9 +19570,9 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB45_4
+; GFX9-NEXT: s_cbranch_execnz .LBB45_3
; GFX9-NEXT: .LBB45_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v15, s31, 1.0
; GFX9-NEXT: v_add_f32_e64 v14, s30, 1.0
@@ -20104,10 +19590,8 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
-; GFX9-NEXT: s_branch .LBB45_5
+; GFX9-NEXT: s_branch .LBB45_4
; GFX9-NEXT: .LBB45_3:
-; GFX9-NEXT: s_branch .LBB45_2
-; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -20124,7 +19608,7 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB45_5: ; %end
+; GFX9-NEXT: .LBB45_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -20142,10 +19626,10 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
; GFX11-NEXT: .LBB45_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v15, s27, 1.0
; GFX11-NEXT: v_add_f32_e64 v14, s26, 1.0
@@ -20165,8 +19649,6 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB45_3:
-; GFX11-NEXT: s_branch .LBB45_2
-; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -21688,7 +21170,7 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; SI-NEXT: v_mul_f32_e64 v21, 1.0, s5
; SI-NEXT: v_mul_f32_e64 v19, 1.0, s44
; SI-NEXT: v_mul_f32_e64 v17, 1.0, s42
-; SI-NEXT: s_cbranch_scc0 .LBB47_4
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v41
; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v63
@@ -21841,9 +21323,6 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB47_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB47_2
;
; VI-LABEL: bitcast_v32bf16_to_v16f32_scalar:
; VI: ; %bb.0:
@@ -21857,9 +21336,9 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_4
+; VI-NEXT: s_cbranch_execnz .LBB47_3
; VI-NEXT: .LBB47_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v16, 0x40c00000
; VI-NEXT: s_lshl_b32 s4, s30, 16
@@ -22158,10 +21637,8 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; VI-NEXT: v_lshrrev_b64 v[16:17], 16, v[17:18]
; VI-NEXT: v_mov_b32_e32 v13, v15
; VI-NEXT: v_mov_b32_e32 v15, v16
-; VI-NEXT: s_branch .LBB47_5
+; VI-NEXT: s_branch .LBB47_4
; VI-NEXT: .LBB47_3:
-; VI-NEXT: s_branch .LBB47_2
-; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -22178,7 +21655,7 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB47_5: ; %end
+; VI-NEXT: .LBB47_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v20, 1
; VI-NEXT: v_readlane_b32 s30, v20, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -22199,9 +21676,9 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
+; GFX9-NEXT: s_cbranch_execnz .LBB47_3
; GFX9-NEXT: .LBB47_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s31, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -22509,10 +21986,8 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; GFX9-NEXT: v_cndmask_b32_e32 v17, v18, v19, vcc
; GFX9-NEXT: v_and_b32_sdwa v16, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v16
-; GFX9-NEXT: s_branch .LBB47_5
+; GFX9-NEXT: s_branch .LBB47_4
; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
-; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -22529,7 +22004,7 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB47_5: ; %end
+; GFX9-NEXT: .LBB47_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v20, 1
; GFX9-NEXT: v_readlane_b32 s30, v20, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -22547,10 +22022,10 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s0, s27, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s27, 16
@@ -22862,8 +22337,6 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v17.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB47_2
-; GFX11-TRUE16-NEXT: .LBB47_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -22883,10 +22356,10 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-FAKE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_and_b32 s1, s27, 0xffff0000
; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s27, 16
@@ -23217,8 +22690,6 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v20, 16, v17
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB47_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB47_2
-; GFX11-FAKE16-NEXT: .LBB47_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -24738,7 +24209,7 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s36, v1
; SI-NEXT: v_writelane_b32 v40, s85, 29
-; SI-NEXT: s_cbranch_scc0 .LBB49_3
+; SI-NEXT: s_cbranch_scc0 .LBB49_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s82, s37, 24
; SI-NEXT: s_lshr_b32 s84, s37, 16
@@ -24788,7 +24259,7 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; SI-NEXT: s_lshr_b64 s[94:95], s[16:17], 24
; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 16
; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 8
-; SI-NEXT: s_cbranch_execnz .LBB49_4
+; SI-NEXT: s_cbranch_execnz .LBB49_3
; SI-NEXT: .LBB49_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v20, s17, 1.0
; SI-NEXT: v_add_f32_e64 v22, s16, 1.0
@@ -24870,58 +24341,8 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; SI-NEXT: v_lshrrev_b32_e32 v38, 24, v20
; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v20
; SI-NEXT: v_lshrrev_b32_e32 v48, 8, v20
-; SI-NEXT: s_branch .LBB49_5
+; SI-NEXT: s_branch .LBB49_4
; SI-NEXT: .LBB49_3:
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr66
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr69
-; SI-NEXT: ; implicit-def: $sgpr67
-; SI-NEXT: ; implicit-def: $sgpr65
-; SI-NEXT: ; implicit-def: $sgpr80
-; SI-NEXT: ; implicit-def: $sgpr70
-; SI-NEXT: ; implicit-def: $sgpr68
-; SI-NEXT: ; implicit-def: $sgpr83
-; SI-NEXT: ; implicit-def: $sgpr81
-; SI-NEXT: ; implicit-def: $sgpr71
-; SI-NEXT: ; implicit-def: $sgpr85
-; SI-NEXT: ; implicit-def: $sgpr84
-; SI-NEXT: ; implicit-def: $sgpr82
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: s_branch .LBB49_2
-; SI-NEXT: .LBB49_4:
; SI-NEXT: v_mov_b32_e32 v22, s16
; SI-NEXT: v_mov_b32_e32 v20, s17
; SI-NEXT: v_mov_b32_e32 v18, s18
@@ -24962,7 +24383,7 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v14, s85
; SI-NEXT: v_mov_b32_e32 v13, s84
; SI-NEXT: v_mov_b32_e32 v12, s82
-; SI-NEXT: .LBB49_5: ; %end
+; SI-NEXT: .LBB49_4: ; %end
; SI-NEXT: v_and_b32_e32 v22, 0xff, v22
; SI-NEXT: s_lshl_b32 s5, s34, 8
; SI-NEXT: v_or_b32_e32 v22, s5, v22
@@ -25234,7 +24655,7 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 ; 4-byte Folded Spill
-; VI-NEXT: s_cbranch_scc0 .LBB49_3
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s56, s5, 24
; VI-NEXT: s_lshr_b32 s57, s5, 16
@@ -25284,7 +24705,7 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; VI-NEXT: s_lshr_b64 s[10:11], s[20:21], 24
; VI-NEXT: s_lshr_b64 s[8:9], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[6:7], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB49_4
+; VI-NEXT: s_cbranch_execnz .LBB49_3
; VI-NEXT: .LBB49_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v6, s27, 1.0
; VI-NEXT: v_add_f32_e64 v5, s26, 1.0
@@ -25354,58 +24775,8 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; VI-NEXT: v_lshrrev_b32_e32 v62, 8, v16
; VI-NEXT: v_lshrrev_b32_e32 v18, 16, v15
; VI-NEXT: v_lshrrev_b32_e32 v17, 8, v15
-; VI-NEXT: s_branch .LBB49_5
+; VI-NEXT: s_branch .LBB49_4
; VI-NEXT: .LBB49_3:
-; VI-NEXT: ; implicit-def: $sgpr67
-; VI-NEXT: ; implicit-def: $sgpr65
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr66
-; VI-NEXT: ; implicit-def: $sgpr64
-; VI-NEXT: ; implicit-def: $sgpr55
-; VI-NEXT: ; implicit-def: $sgpr54
-; VI-NEXT: ; implicit-def: $sgpr52
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr53
-; VI-NEXT: ; implicit-def: $sgpr51
-; VI-NEXT: ; implicit-def: $sgpr50
-; VI-NEXT: ; implicit-def: $sgpr49
-; VI-NEXT: ; implicit-def: $sgpr39
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr48
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr37
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: s_branch .LBB49_2
-; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v19, s44
; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
@@ -25474,7 +24845,7 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v19, s40
; VI-NEXT: v_mov_b32_e32 v20, s14
-; VI-NEXT: .LBB49_5: ; %end
+; VI-NEXT: .LBB49_4: ; %end
; VI-NEXT: s_mov_b32 s4, 0xc0c0004
; VI-NEXT: v_perm_b32 v18, v18, v24, s4
; VI-NEXT: v_perm_b32 v15, v15, v17, s4
@@ -25659,7 +25030,7 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s56, s5, 24
; GFX9-NEXT: s_lshr_b32 s57, s5, 16
@@ -25709,7 +25080,7 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; GFX9-NEXT: s_lshr_b64 s[10:11], s[20:21], 24
; GFX9-NEXT: s_lshr_b64 s[8:9], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[6:7], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
+; GFX9-NEXT: s_cbranch_execnz .LBB49_3
; GFX9-NEXT: .LBB49_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v6, s27, 1.0
; GFX9-NEXT: v_add_f32_e64 v5, s26, 1.0
@@ -25781,58 +25152,8 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; GFX9-NEXT: v_lshrrev_b32_e32 v62, 8, v19
; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v18
; GFX9-NEXT: v_lshrrev_b32_e32 v15, 8, v18
-; GFX9-NEXT: s_branch .LBB49_5
+; GFX9-NEXT: s_branch .LBB49_4
; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: ; implicit-def: $sgpr55
-; GFX9-NEXT: ; implicit-def: $sgpr53
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr54
-; GFX9-NEXT: ; implicit-def: $sgpr52
-; GFX9-NEXT: ; implicit-def: $sgpr51
-; GFX9-NEXT: ; implicit-def: $sgpr50
-; GFX9-NEXT: ; implicit-def: $sgpr48
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr49
-; GFX9-NEXT: ; implicit-def: $sgpr39
-; GFX9-NEXT: ; implicit-def: $sgpr38
-; GFX9-NEXT: ; implicit-def: $sgpr37
-; GFX9-NEXT: ; implicit-def: $sgpr35
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr36
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr31
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: s_branch .LBB49_2
-; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v20, s44
; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
@@ -25903,7 +25224,7 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v20, s40
; GFX9-NEXT: v_mov_b32_e32 v21, s14
-; GFX9-NEXT: .LBB49_5: ; %end
+; GFX9-NEXT: .LBB49_4: ; %end
; GFX9-NEXT: s_mov_b32 s4, 0xc0c0004
; GFX9-NEXT: v_perm_b32 v16, v16, v25, s4
; GFX9-NEXT: v_perm_b32 v15, v18, v15, s4
@@ -26048,7 +25369,7 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; GFX11-NEXT: v_writelane_b32 v40, s39, 7
; GFX11-NEXT: v_writelane_b32 v40, s48, 8
; GFX11-NEXT: v_writelane_b32 v40, s49, 9
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s43, s27, 24
; GFX11-NEXT: s_lshr_b32 s44, s27, 16
@@ -26099,7 +25420,7 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v4, s25, 1.0
; GFX11-NEXT: v_add_f32_e64 v3, s24, 1.0
@@ -26165,58 +25486,8 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; GFX11-NEXT: v_lshrrev_b32_e32 v83, 8, v18
; GFX11-NEXT: v_lshrrev_b32_e32 v96, 16, v17
; GFX11-NEXT: v_lshrrev_b32_e32 v85, 8, v17
-; GFX11-NEXT: s_branch .LBB49_5
+; GFX11-NEXT: s_branch .LBB49_4
; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: ; implicit-def: $sgpr49
-; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr48
-; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr37
-; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr35
-; GFX11-NEXT: ; implicit-def: $sgpr31
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $vcc_hi
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr95
-; GFX11-NEXT: ; implicit-def: $sgpr93
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr91
-; GFX11-NEXT: ; implicit-def: $sgpr89
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr79
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr77
-; GFX11-NEXT: ; implicit-def: $sgpr75
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: s_branch .LBB49_2
-; GFX11-NEXT: .LBB49_4:
; GFX11-NEXT: v_dual_mov_b32 v17, s0 :: v_dual_mov_b32 v18, s1
; GFX11-NEXT: v_dual_mov_b32 v13, s2 :: v_dual_mov_b32 v14, s3
; GFX11-NEXT: v_dual_mov_b32 v11, s16 :: v_dual_mov_b32 v12, s17
@@ -26249,7 +25520,7 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v20, s14 :: v_dual_mov_b32 v21, s12
; GFX11-NEXT: v_dual_mov_b32 v22, s10 :: v_dual_mov_b32 v23, s8
; GFX11-NEXT: v_dual_mov_b32 v24, s6 :: v_dual_mov_b32 v25, s4
-; GFX11-NEXT: .LBB49_5: ; %end
+; GFX11-NEXT: .LBB49_4: ; %end
; GFX11-NEXT: v_perm_b32 v86, v86, v87, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_perm_b32 v24, v84, v24, 0xc0c0004
@@ -28702,7 +27973,7 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v37
; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v27, 8, v27
-; SI-NEXT: s_cbranch_scc0 .LBB51_4
+; SI-NEXT: s_cbranch_scc0 .LBB51_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s6, 0xff
; SI-NEXT: v_or_b32_e32 v0, s4, v34
@@ -29066,9 +28337,6 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; SI-NEXT: .LBB51_3: ; %end
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB51_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB51_2
;
; VI-LABEL: bitcast_v64i8_to_v16f32_scalar:
; VI: ; %bb.0:
@@ -29127,7 +28395,7 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v35
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB51_4
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v11, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -29377,9 +28645,6 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; VI-NEXT: .LBB51_3: ; %end
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB51_2
;
; GFX9-LABEL: bitcast_v64i8_to_v16f32_scalar:
; GFX9: ; %bb.0:
@@ -29438,7 +28703,7 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX9-NEXT: s_waitcnt vmcnt(19)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v35
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v11, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -29677,9 +28942,6 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX9-NEXT: .LBB51_3: ; %end
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB51_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB51_2
;
; GFX11-TRUE16-LABEL: bitcast_v64i8_to_v16f32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -29736,7 +28998,7 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v54
; GFX11-TRUE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v11, 0xc0c0004
; GFX11-TRUE16-NEXT: v_perm_b32 v16, v37, v34, 0xc0c0004
@@ -29961,9 +29223,6 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: .LBB51_3: ; %end
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB51_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB51_2
;
; GFX11-FAKE16-LABEL: bitcast_v64i8_to_v16f32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -30020,7 +29279,7 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v54
; GFX11-FAKE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB51_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v11, 0xc0c0004
; GFX11-FAKE16-NEXT: v_perm_b32 v16, v37, v34, 0xc0c0004
@@ -30245,9 +29504,6 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: .LBB51_3: ; %end
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB51_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB51_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -30411,7 +29667,7 @@ define inreg <8 x double> @bitcast_v8i64_to_v8f64_scalar(<8 x i64> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s6, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s7, v0
-; SI-NEXT: s_cbranch_scc0 .LBB53_4
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB53_3
; SI-NEXT: .LBB53_2: ; %cmp.true
@@ -30449,8 +29705,6 @@ define inreg <8 x double> @bitcast_v8i64_to_v8f64_scalar(<8 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v14, s7
; SI-NEXT: v_mov_b32_e32 v15, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB53_4:
-; SI-NEXT: s_branch .LBB53_2
;
; VI-LABEL: bitcast_v8i64_to_v8f64_scalar:
; VI: ; %bb.0:
@@ -30459,7 +29713,7 @@ define inreg <8 x double> @bitcast_v8i64_to_v8f64_scalar(<8 x i64> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB53_4
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB53_3
; VI-NEXT: .LBB53_2: ; %cmp.true
@@ -30497,8 +29751,6 @@ define inreg <8 x double> @bitcast_v8i64_to_v8f64_scalar(<8 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v14, s7
; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB53_4:
-; VI-NEXT: s_branch .LBB53_2
;
; GFX9-LABEL: bitcast_v8i64_to_v8f64_scalar:
; GFX9: ; %bb.0:
@@ -30507,7 +29759,7 @@ define inreg <8 x double> @bitcast_v8i64_to_v8f64_scalar(<8 x i64> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB53_3
; GFX9-NEXT: .LBB53_2: ; %cmp.true
@@ -30545,15 +29797,13 @@ define inreg <8 x double> @bitcast_v8i64_to_v8f64_scalar(<8 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v14, s7
; GFX9-NEXT: v_mov_b32_e32 v15, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB53_4:
-; GFX9-NEXT: s_branch .LBB53_2
;
; GFX11-LABEL: bitcast_v8i64_to_v8f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
@@ -30584,8 +29834,6 @@ define inreg <8 x double> @bitcast_v8i64_to_v8f64_scalar(<8 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -30719,9 +29967,9 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s31, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s30, v0
-; SI-NEXT: s_cbranch_scc0 .LBB55_3
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB55_4
+; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
@@ -30731,10 +29979,8 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; SI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; SI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; SI-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
-; SI-NEXT: s_branch .LBB55_5
+; SI-NEXT: s_branch .LBB55_4
; SI-NEXT: .LBB55_3:
-; SI-NEXT: s_branch .LBB55_2
-; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -30751,7 +29997,7 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: v_mov_b32_e32 v14, s30
; SI-NEXT: v_mov_b32_e32 v15, s31
-; SI-NEXT: .LBB55_5: ; %end
+; SI-NEXT: .LBB55_4: ; %end
; SI-NEXT: v_readlane_b32 s31, v16, 1
; SI-NEXT: v_readlane_b32 s30, v16, 0
; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -30772,9 +30018,9 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_4
+; VI-NEXT: s_cbranch_execnz .LBB55_3
; VI-NEXT: .LBB55_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
@@ -30784,10 +30030,8 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; VI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; VI-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
-; VI-NEXT: s_branch .LBB55_5
+; VI-NEXT: s_branch .LBB55_4
; VI-NEXT: .LBB55_3:
-; VI-NEXT: s_branch .LBB55_2
-; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -30804,7 +30048,7 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB55_5: ; %end
+; VI-NEXT: .LBB55_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v16, 1
; VI-NEXT: v_readlane_b32 s30, v16, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -30825,9 +30069,9 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
+; GFX9-NEXT: s_cbranch_execnz .LBB55_3
; GFX9-NEXT: .LBB55_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
@@ -30837,10 +30081,8 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; GFX9-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
-; GFX9-NEXT: s_branch .LBB55_5
+; GFX9-NEXT: s_branch .LBB55_4
; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
-; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -30857,7 +30099,7 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB55_5: ; %end
+; GFX9-NEXT: .LBB55_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -30875,10 +30117,10 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-NEXT: .LBB55_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
@@ -30890,8 +30132,6 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; GFX11-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
-; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -31163,7 +30403,7 @@ define inreg <32 x i16> @bitcast_v8i64_to_v32i16_scalar(<8 x i64> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s5, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v0
-; SI-NEXT: s_cbranch_scc0 .LBB57_4
+; SI-NEXT: s_cbranch_scc0 .LBB57_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s56, s5, 16
; SI-NEXT: s_lshr_b32 s57, s29, 16
@@ -31281,24 +30521,6 @@ define inreg <32 x i16> @bitcast_v8i64_to_v32i16_scalar(<8 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v14, s4
; SI-NEXT: v_mov_b32_e32 v15, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB57_4:
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: s_branch .LBB57_2
;
; VI-LABEL: bitcast_v8i64_to_v32i16_scalar:
; VI: ; %bb.0:
@@ -31307,7 +30529,7 @@ define inreg <32 x i16> @bitcast_v8i64_to_v32i16_scalar(<8 x i64> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB57_3
; VI-NEXT: .LBB57_2: ; %cmp.true
@@ -31345,8 +30567,6 @@ define inreg <32 x i16> @bitcast_v8i64_to_v32i16_scalar(<8 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v14, s7
; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v8i64_to_v32i16_scalar:
; GFX9: ; %bb.0:
@@ -31355,7 +30575,7 @@ define inreg <32 x i16> @bitcast_v8i64_to_v32i16_scalar(<8 x i64> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB57_3
; GFX9-NEXT: .LBB57_2: ; %cmp.true
@@ -31393,15 +30613,13 @@ define inreg <32 x i16> @bitcast_v8i64_to_v32i16_scalar(<8 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v14, s7
; GFX9-NEXT: v_mov_b32_e32 v15, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB57_4:
-; GFX9-NEXT: s_branch .LBB57_2
;
; GFX11-LABEL: bitcast_v8i64_to_v32i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB57_3
@@ -31433,8 +30651,6 @@ define inreg <32 x i16> @bitcast_v8i64_to_v32i16_scalar(<8 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB57_4:
-; GFX11-NEXT: s_branch .LBB57_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -31824,7 +31040,7 @@ define inreg <8 x i64> @bitcast_v32i16_to_v8i64_scalar(<32 x i16> inreg %a, i32
; SI-NEXT: v_writelane_b32 v16, s50, 6
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v16, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB59_4
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s63, 16
@@ -31986,9 +31202,6 @@ define inreg <8 x i64> @bitcast_v32i16_to_v8i64_scalar(<32 x i16> inreg %a, i32
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB59_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB59_2
;
; VI-LABEL: bitcast_v32i16_to_v8i64_scalar:
; VI: ; %bb.0:
@@ -31997,7 +31210,7 @@ define inreg <8 x i64> @bitcast_v32i16_to_v8i64_scalar(<32 x i16> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB59_4
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB59_3
; VI-NEXT: .LBB59_2: ; %cmp.true
@@ -32099,8 +31312,6 @@ define inreg <8 x i64> @bitcast_v32i16_to_v8i64_scalar(<32 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v14, s7
; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB59_4:
-; VI-NEXT: s_branch .LBB59_2
;
; GFX9-LABEL: bitcast_v32i16_to_v8i64_scalar:
; GFX9: ; %bb.0:
@@ -32114,9 +31325,9 @@ define inreg <8 x i64> @bitcast_v32i16_to_v8i64_scalar(<32 x i16> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
+; GFX9-NEXT: s_cbranch_execnz .LBB59_3
; GFX9-NEXT: .LBB59_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, s31, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, s30, 3 op_sel_hi:[1,0]
@@ -32134,10 +31345,8 @@ define inreg <8 x i64> @bitcast_v32i16_to_v8i64_scalar(<32 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB59_5
+; GFX9-NEXT: s_branch .LBB59_4
; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
-; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -32154,7 +31363,7 @@ define inreg <8 x i64> @bitcast_v32i16_to_v8i64_scalar(<32 x i16> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB59_5: ; %end
+; GFX9-NEXT: .LBB59_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -32172,10 +31381,10 @@ define inreg <8 x i64> @bitcast_v32i16_to_v8i64_scalar(<32 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-NEXT: .LBB59_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
@@ -32195,8 +31404,6 @@ define inreg <8 x i64> @bitcast_v32i16_to_v8i64_scalar(<32 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB59_3:
-; GFX11-NEXT: s_branch .LBB59_2
-; GFX11-NEXT: .LBB59_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -32468,7 +31675,7 @@ define inreg <32 x half> @bitcast_v8i64_to_v32f16_scalar(<8 x i64> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s5, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v0
-; SI-NEXT: s_cbranch_scc0 .LBB61_4
+; SI-NEXT: s_cbranch_scc0 .LBB61_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s56, s5, 16
; SI-NEXT: s_lshr_b32 s57, s29, 16
@@ -32586,24 +31793,6 @@ define inreg <32 x half> @bitcast_v8i64_to_v32f16_scalar(<8 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v14, s4
; SI-NEXT: v_mov_b32_e32 v15, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB61_4:
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: s_branch .LBB61_2
;
; VI-LABEL: bitcast_v8i64_to_v32f16_scalar:
; VI: ; %bb.0:
@@ -32612,7 +31801,7 @@ define inreg <32 x half> @bitcast_v8i64_to_v32f16_scalar(<8 x i64> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB61_4
+; VI-NEXT: s_cbranch_scc0 .LBB61_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB61_3
; VI-NEXT: .LBB61_2: ; %cmp.true
@@ -32650,8 +31839,6 @@ define inreg <32 x half> @bitcast_v8i64_to_v32f16_scalar(<8 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v14, s7
; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB61_4:
-; VI-NEXT: s_branch .LBB61_2
;
; GFX9-LABEL: bitcast_v8i64_to_v32f16_scalar:
; GFX9: ; %bb.0:
@@ -32660,7 +31847,7 @@ define inreg <32 x half> @bitcast_v8i64_to_v32f16_scalar(<8 x i64> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB61_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB61_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB61_3
; GFX9-NEXT: .LBB61_2: ; %cmp.true
@@ -32698,15 +31885,13 @@ define inreg <32 x half> @bitcast_v8i64_to_v32f16_scalar(<8 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v14, s7
; GFX9-NEXT: v_mov_b32_e32 v15, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB61_4:
-; GFX9-NEXT: s_branch .LBB61_2
;
; GFX11-LABEL: bitcast_v8i64_to_v32f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB61_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB61_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB61_3
@@ -32738,8 +31923,6 @@ define inreg <32 x half> @bitcast_v8i64_to_v32f16_scalar(<8 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB61_4:
-; GFX11-NEXT: s_branch .LBB61_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -33193,7 +32376,7 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; SI-NEXT: v_writelane_b32 v18, s50, 6
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v18, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB63_3
+; SI-NEXT: s_cbranch_scc0 .LBB63_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s63, 16
@@ -33243,7 +32426,7 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s51, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB63_4
+; SI-NEXT: s_cbranch_execnz .LBB63_3
; SI-NEXT: .LBB63_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s63
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -33373,11 +32556,8 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v16
; SI-NEXT: v_or_b32_e32 v15, v17, v15
-; SI-NEXT: s_branch .LBB63_5
+; SI-NEXT: s_branch .LBB63_4
; SI-NEXT: .LBB63_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB63_2
-; SI-NEXT: .LBB63_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -33394,7 +32574,7 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v13, s49
; SI-NEXT: v_mov_b32_e32 v14, s50
; SI-NEXT: v_mov_b32_e32 v15, s51
-; SI-NEXT: .LBB63_5: ; %end
+; SI-NEXT: .LBB63_4: ; %end
; SI-NEXT: v_readlane_b32 s51, v18, 7
; SI-NEXT: v_readlane_b32 s50, v18, 6
; SI-NEXT: v_readlane_b32 s49, v18, 5
@@ -33421,9 +32601,9 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB63_3
+; VI-NEXT: s_cbranch_scc0 .LBB63_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB63_4
+; VI-NEXT: s_cbranch_execnz .LBB63_3
; VI-NEXT: .LBB63_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s31, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -33506,10 +32686,8 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; VI-NEXT: v_add_f16_sdwa v16, v16, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v16
-; VI-NEXT: s_branch .LBB63_5
+; VI-NEXT: s_branch .LBB63_4
; VI-NEXT: .LBB63_3:
-; VI-NEXT: s_branch .LBB63_2
-; VI-NEXT: .LBB63_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -33526,7 +32704,7 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB63_5: ; %end
+; VI-NEXT: .LBB63_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v17, 1
; VI-NEXT: v_readlane_b32 s30, v17, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -33547,9 +32725,9 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB63_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB63_4
+; GFX9-NEXT: s_cbranch_execnz .LBB63_3
; GFX9-NEXT: .LBB63_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, s31, v0 op_sel_hi:[1,0]
@@ -33568,10 +32746,8 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB63_5
+; GFX9-NEXT: s_branch .LBB63_4
; GFX9-NEXT: .LBB63_3:
-; GFX9-NEXT: s_branch .LBB63_2
-; GFX9-NEXT: .LBB63_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -33588,7 +32764,7 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB63_5: ; %end
+; GFX9-NEXT: .LBB63_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -33606,10 +32782,10 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB63_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB63_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB63_3
; GFX11-NEXT: .LBB63_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
@@ -33629,8 +32805,6 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB63_3:
-; GFX11-NEXT: s_branch .LBB63_2
-; GFX11-NEXT: .LBB63_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -33982,7 +33156,7 @@ define inreg <32 x bfloat> @bitcast_v8i64_to_v32bf16_scalar(<8 x i64> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s78, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s79, v0
-; SI-NEXT: s_cbranch_scc0 .LBB65_4
+; SI-NEXT: s_cbranch_scc0 .LBB65_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s7, s78, 0xffff0000
; SI-NEXT: s_lshl_b32 s6, s78, 16
@@ -34132,40 +33306,6 @@ define inreg <32 x bfloat> @bitcast_v8i64_to_v32bf16_scalar(<8 x i64> inreg %a,
; SI-NEXT: v_mul_f32_e64 v15, 1.0, s6
; SI-NEXT: v_lshr_b64 v[15:16], v[15:16], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB65_4:
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB65_2
;
; VI-LABEL: bitcast_v8i64_to_v32bf16_scalar:
; VI: ; %bb.0:
@@ -34174,7 +33314,7 @@ define inreg <32 x bfloat> @bitcast_v8i64_to_v32bf16_scalar(<8 x i64> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB65_4
+; VI-NEXT: s_cbranch_scc0 .LBB65_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB65_3
; VI-NEXT: .LBB65_2: ; %cmp.true
@@ -34212,8 +33352,6 @@ define inreg <32 x bfloat> @bitcast_v8i64_to_v32bf16_scalar(<8 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v14, s7
; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB65_4:
-; VI-NEXT: s_branch .LBB65_2
;
; GFX9-LABEL: bitcast_v8i64_to_v32bf16_scalar:
; GFX9: ; %bb.0:
@@ -34222,7 +33360,7 @@ define inreg <32 x bfloat> @bitcast_v8i64_to_v32bf16_scalar(<8 x i64> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB65_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB65_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB65_3
; GFX9-NEXT: .LBB65_2: ; %cmp.true
@@ -34260,15 +33398,13 @@ define inreg <32 x bfloat> @bitcast_v8i64_to_v32bf16_scalar(<8 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v14, s7
; GFX9-NEXT: v_mov_b32_e32 v15, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB65_4:
-; GFX9-NEXT: s_branch .LBB65_2
;
; GFX11-LABEL: bitcast_v8i64_to_v32bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB65_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB65_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB65_3
@@ -34300,8 +33436,6 @@ define inreg <32 x bfloat> @bitcast_v8i64_to_v32bf16_scalar(<8 x i64> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB65_4:
-; GFX11-NEXT: s_branch .LBB65_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -35814,7 +34948,7 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_mul_f32_e64 v21, 1.0, s5
; SI-NEXT: v_mul_f32_e64 v19, 1.0, s44
; SI-NEXT: v_mul_f32_e64 v17, 1.0, s42
-; SI-NEXT: s_cbranch_scc0 .LBB67_4
+; SI-NEXT: s_cbranch_scc0 .LBB67_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v41
; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v63
@@ -35967,9 +35101,6 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB67_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB67_2
;
; VI-LABEL: bitcast_v32bf16_to_v8i64_scalar:
; VI: ; %bb.0:
@@ -35983,9 +35114,9 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB67_3
+; VI-NEXT: s_cbranch_scc0 .LBB67_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB67_4
+; VI-NEXT: s_cbranch_execnz .LBB67_3
; VI-NEXT: .LBB67_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v16, 0x40c00000
; VI-NEXT: s_lshl_b32 s4, s30, 16
@@ -36284,10 +35415,8 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; VI-NEXT: v_lshrrev_b64 v[16:17], 16, v[17:18]
; VI-NEXT: v_mov_b32_e32 v13, v15
; VI-NEXT: v_mov_b32_e32 v15, v16
-; VI-NEXT: s_branch .LBB67_5
+; VI-NEXT: s_branch .LBB67_4
; VI-NEXT: .LBB67_3:
-; VI-NEXT: s_branch .LBB67_2
-; VI-NEXT: .LBB67_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -36304,7 +35433,7 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB67_5: ; %end
+; VI-NEXT: .LBB67_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v20, 1
; VI-NEXT: v_readlane_b32 s30, v20, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -36325,9 +35454,9 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB67_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB67_4
+; GFX9-NEXT: s_cbranch_execnz .LBB67_3
; GFX9-NEXT: .LBB67_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s31, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -36635,10 +35764,8 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; GFX9-NEXT: v_cndmask_b32_e32 v17, v18, v19, vcc
; GFX9-NEXT: v_and_b32_sdwa v16, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v16
-; GFX9-NEXT: s_branch .LBB67_5
+; GFX9-NEXT: s_branch .LBB67_4
; GFX9-NEXT: .LBB67_3:
-; GFX9-NEXT: s_branch .LBB67_2
-; GFX9-NEXT: .LBB67_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -36655,7 +35782,7 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB67_5: ; %end
+; GFX9-NEXT: .LBB67_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v20, 1
; GFX9-NEXT: v_readlane_b32 s30, v20, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -36673,10 +35800,10 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB67_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB67_3
; GFX11-TRUE16-NEXT: .LBB67_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s0, s27, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s27, 16
@@ -36988,8 +36115,6 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v17.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB67_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB67_2
-; GFX11-TRUE16-NEXT: .LBB67_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -37009,10 +36134,10 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB67_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB67_3
; GFX11-FAKE16-NEXT: .LBB67_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_and_b32 s1, s27, 0xffff0000
; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s27, 16
@@ -37343,8 +36468,6 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v20, 16, v17
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB67_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB67_2
-; GFX11-FAKE16-NEXT: .LBB67_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -38890,7 +38013,7 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v1
; SI-NEXT: v_writelane_b32 v4, s85, 29
-; SI-NEXT: s_cbranch_scc0 .LBB69_4
+; SI-NEXT: s_cbranch_scc0 .LBB69_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s38, s5, 24
; SI-NEXT: s_lshr_b32 s39, s5, 16
@@ -39246,56 +38369,6 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB69_4:
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr85
-; SI-NEXT: ; implicit-def: $sgpr84
-; SI-NEXT: ; implicit-def: $sgpr83
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr82
-; SI-NEXT: ; implicit-def: $sgpr81
-; SI-NEXT: ; implicit-def: $sgpr80
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr71
-; SI-NEXT: ; implicit-def: $sgpr70
-; SI-NEXT: ; implicit-def: $sgpr69
-; SI-NEXT: ; implicit-def: $sgpr68
-; SI-NEXT: ; implicit-def: $sgpr67
-; SI-NEXT: ; implicit-def: $sgpr66
-; SI-NEXT: ; implicit-def: $sgpr65
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB69_2
;
; VI-LABEL: bitcast_v8i64_to_v64i8_scalar:
; VI: ; %bb.0:
@@ -39327,7 +38400,7 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s4, v1
; VI-NEXT: v_writelane_b32 v4, s67, 19
-; VI-NEXT: s_cbranch_scc0 .LBB69_4
+; VI-NEXT: s_cbranch_scc0 .LBB69_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s56, s5, 24
; VI-NEXT: s_lshr_b32 s57, s5, 16
@@ -39597,56 +38670,6 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB69_4:
-; VI-NEXT: ; implicit-def: $sgpr67
-; VI-NEXT: ; implicit-def: $sgpr66
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr65
-; VI-NEXT: ; implicit-def: $sgpr64
-; VI-NEXT: ; implicit-def: $sgpr55
-; VI-NEXT: ; implicit-def: $sgpr54
-; VI-NEXT: ; implicit-def: $sgpr53
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr52
-; VI-NEXT: ; implicit-def: $sgpr51
-; VI-NEXT: ; implicit-def: $sgpr50
-; VI-NEXT: ; implicit-def: $sgpr49
-; VI-NEXT: ; implicit-def: $sgpr48
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr39
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr37
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: s_branch .LBB69_2
;
; GFX9-LABEL: bitcast_v8i64_to_v64i8_scalar:
; GFX9: ; %bb.0:
@@ -39674,7 +38697,7 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s4, v1
; GFX9-NEXT: v_writelane_b32 v4, s55, 15
-; GFX9-NEXT: s_cbranch_scc0 .LBB69_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB69_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s56, s5, 24
; GFX9-NEXT: s_lshr_b32 s57, s5, 16
@@ -39925,56 +38948,6 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB69_4:
-; GFX9-NEXT: ; implicit-def: $sgpr55
-; GFX9-NEXT: ; implicit-def: $sgpr54
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr53
-; GFX9-NEXT: ; implicit-def: $sgpr52
-; GFX9-NEXT: ; implicit-def: $sgpr51
-; GFX9-NEXT: ; implicit-def: $sgpr50
-; GFX9-NEXT: ; implicit-def: $sgpr49
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr48
-; GFX9-NEXT: ; implicit-def: $sgpr39
-; GFX9-NEXT: ; implicit-def: $sgpr38
-; GFX9-NEXT: ; implicit-def: $sgpr37
-; GFX9-NEXT: ; implicit-def: $sgpr36
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr35
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr31
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: s_branch .LBB69_2
;
; GFX11-LABEL: bitcast_v8i64_to_v64i8_scalar:
; GFX11: ; %bb.0:
@@ -39993,7 +38966,7 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in
; GFX11-NEXT: v_writelane_b32 v23, s38, 6
; GFX11-NEXT: v_writelane_b32 v23, s39, 7
; GFX11-NEXT: v_writelane_b32 v23, s48, 8
-; GFX11-NEXT: s_cbranch_scc0 .LBB69_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB69_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s42, s27, 24
; GFX11-NEXT: s_lshr_b32 s44, s27, 16
@@ -40196,56 +39169,6 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in
; GFX11-NEXT: s_mov_b32 exec_lo, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB69_4:
-; GFX11-NEXT: ; implicit-def: $sgpr48
-; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr37
-; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr35
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr31
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $vcc_hi
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr95
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr91
-; GFX11-NEXT: ; implicit-def: $sgpr93
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr89
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr79
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr77
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr75
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: s_branch .LBB69_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -42613,7 +41536,7 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v37
; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v27, 8, v27
-; SI-NEXT: s_cbranch_scc0 .LBB71_4
+; SI-NEXT: s_cbranch_scc0 .LBB71_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s6, 0xff
; SI-NEXT: v_or_b32_e32 v0, s4, v34
@@ -42977,9 +41900,6 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; SI-NEXT: .LBB71_3: ; %end
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB71_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB71_2
;
; VI-LABEL: bitcast_v64i8_to_v8i64_scalar:
; VI: ; %bb.0:
@@ -43038,7 +41958,7 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v35
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB71_4
+; VI-NEXT: s_cbranch_scc0 .LBB71_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v11, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -43288,9 +42208,6 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; VI-NEXT: .LBB71_3: ; %end
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB71_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB71_2
;
; GFX9-LABEL: bitcast_v64i8_to_v8i64_scalar:
; GFX9: ; %bb.0:
@@ -43349,7 +42266,7 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX9-NEXT: s_waitcnt vmcnt(19)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v35
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB71_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB71_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v11, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -43588,9 +42505,6 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX9-NEXT: .LBB71_3: ; %end
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB71_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB71_2
;
; GFX11-TRUE16-LABEL: bitcast_v64i8_to_v8i64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -43647,7 +42561,7 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v54
; GFX11-TRUE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB71_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB71_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v11, 0xc0c0004
; GFX11-TRUE16-NEXT: v_perm_b32 v16, v37, v34, 0xc0c0004
@@ -43872,9 +42786,6 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX11-TRUE16-NEXT: .LBB71_3: ; %end
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB71_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB71_2
;
; GFX11-FAKE16-LABEL: bitcast_v64i8_to_v8i64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -43931,7 +42842,7 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v54
; GFX11-FAKE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB71_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB71_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v11, 0xc0c0004
; GFX11-FAKE16-NEXT: v_perm_b32 v16, v37, v34, 0xc0c0004
@@ -44156,9 +43067,6 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX11-FAKE16-NEXT: .LBB71_3: ; %end
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB71_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB71_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -44385,7 +43293,7 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s5, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v0
-; SI-NEXT: s_cbranch_scc0 .LBB73_3
+; SI-NEXT: s_cbranch_scc0 .LBB73_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s63, s5, 16
; SI-NEXT: s_lshr_b32 s62, s29, 16
@@ -44403,7 +43311,7 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB73_4
+; SI-NEXT: s_cbranch_execnz .LBB73_3
; SI-NEXT: .LBB73_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[14:15], s[4:5], 1.0
; SI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -44429,26 +43337,8 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v1
-; SI-NEXT: s_branch .LBB73_5
+; SI-NEXT: s_branch .LBB73_4
; SI-NEXT: .LBB73_3:
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: s_branch .LBB73_2
-; SI-NEXT: .LBB73_4:
; SI-NEXT: v_mov_b32_e32 v15, s5
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: v_mov_b32_e32 v11, s27
@@ -44481,7 +43371,7 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v18, s10
; SI-NEXT: v_mov_b32_e32 v17, s8
; SI-NEXT: v_mov_b32_e32 v16, s6
-; SI-NEXT: .LBB73_5: ; %end
+; SI-NEXT: .LBB73_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
@@ -44544,9 +43434,9 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB73_3
+; VI-NEXT: s_cbranch_scc0 .LBB73_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB73_4
+; VI-NEXT: s_cbranch_execnz .LBB73_3
; VI-NEXT: .LBB73_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
; VI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -44556,10 +43446,8 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
-; VI-NEXT: s_branch .LBB73_5
+; VI-NEXT: s_branch .LBB73_4
; VI-NEXT: .LBB73_3:
-; VI-NEXT: s_branch .LBB73_2
-; VI-NEXT: .LBB73_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -44576,7 +43464,7 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB73_5: ; %end
+; VI-NEXT: .LBB73_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v16, 1
; VI-NEXT: v_readlane_b32 s30, v16, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -44597,9 +43485,9 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB73_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB73_4
+; GFX9-NEXT: s_cbranch_execnz .LBB73_3
; GFX9-NEXT: .LBB73_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -44609,10 +43497,8 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
-; GFX9-NEXT: s_branch .LBB73_5
+; GFX9-NEXT: s_branch .LBB73_4
; GFX9-NEXT: .LBB73_3:
-; GFX9-NEXT: s_branch .LBB73_2
-; GFX9-NEXT: .LBB73_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -44629,7 +43515,7 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB73_5: ; %end
+; GFX9-NEXT: .LBB73_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -44647,10 +43533,10 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB73_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB73_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB73_3
; GFX11-NEXT: .LBB73_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
@@ -44662,8 +43548,6 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB73_3:
-; GFX11-NEXT: s_branch .LBB73_2
-; GFX11-NEXT: .LBB73_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -45062,7 +43946,7 @@ define inreg <8 x double> @bitcast_v32i16_to_v8f64_scalar(<32 x i16> inreg %a, i
; SI-NEXT: v_writelane_b32 v16, s50, 6
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v16, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB75_4
+; SI-NEXT: s_cbranch_scc0 .LBB75_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s63, 16
@@ -45224,9 +44108,6 @@ define inreg <8 x double> @bitcast_v32i16_to_v8f64_scalar(<32 x i16> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB75_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB75_2
;
; VI-LABEL: bitcast_v32i16_to_v8f64_scalar:
; VI: ; %bb.0:
@@ -45235,7 +44116,7 @@ define inreg <8 x double> @bitcast_v32i16_to_v8f64_scalar(<32 x i16> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB75_4
+; VI-NEXT: s_cbranch_scc0 .LBB75_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB75_3
; VI-NEXT: .LBB75_2: ; %cmp.true
@@ -45337,8 +44218,6 @@ define inreg <8 x double> @bitcast_v32i16_to_v8f64_scalar(<32 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v14, s7
; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB75_4:
-; VI-NEXT: s_branch .LBB75_2
;
; GFX9-LABEL: bitcast_v32i16_to_v8f64_scalar:
; GFX9: ; %bb.0:
@@ -45352,9 +44231,9 @@ define inreg <8 x double> @bitcast_v32i16_to_v8f64_scalar(<32 x i16> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB75_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB75_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB75_4
+; GFX9-NEXT: s_cbranch_execnz .LBB75_3
; GFX9-NEXT: .LBB75_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, s31, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, s30, 3 op_sel_hi:[1,0]
@@ -45372,10 +44251,8 @@ define inreg <8 x double> @bitcast_v32i16_to_v8f64_scalar(<32 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB75_5
+; GFX9-NEXT: s_branch .LBB75_4
; GFX9-NEXT: .LBB75_3:
-; GFX9-NEXT: s_branch .LBB75_2
-; GFX9-NEXT: .LBB75_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -45392,7 +44269,7 @@ define inreg <8 x double> @bitcast_v32i16_to_v8f64_scalar(<32 x i16> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB75_5: ; %end
+; GFX9-NEXT: .LBB75_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -45410,10 +44287,10 @@ define inreg <8 x double> @bitcast_v32i16_to_v8f64_scalar(<32 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB75_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB75_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB75_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB75_3
; GFX11-NEXT: .LBB75_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
@@ -45433,8 +44310,6 @@ define inreg <8 x double> @bitcast_v32i16_to_v8f64_scalar(<32 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB75_3:
-; GFX11-NEXT: s_branch .LBB75_2
-; GFX11-NEXT: .LBB75_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -45670,7 +44545,7 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s5, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v0
-; SI-NEXT: s_cbranch_scc0 .LBB77_3
+; SI-NEXT: s_cbranch_scc0 .LBB77_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s63, s5, 16
; SI-NEXT: s_lshr_b32 s62, s29, 16
@@ -45688,7 +44563,7 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB77_4
+; SI-NEXT: s_cbranch_execnz .LBB77_3
; SI-NEXT: .LBB77_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[14:15], s[4:5], 1.0
; SI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -45714,26 +44589,8 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v1
-; SI-NEXT: s_branch .LBB77_5
+; SI-NEXT: s_branch .LBB77_4
; SI-NEXT: .LBB77_3:
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: s_branch .LBB77_2
-; SI-NEXT: .LBB77_4:
; SI-NEXT: v_mov_b32_e32 v15, s5
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: v_mov_b32_e32 v11, s27
@@ -45766,7 +44623,7 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v18, s10
; SI-NEXT: v_mov_b32_e32 v17, s8
; SI-NEXT: v_mov_b32_e32 v16, s6
-; SI-NEXT: .LBB77_5: ; %end
+; SI-NEXT: .LBB77_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
@@ -45829,9 +44686,9 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB77_3
+; VI-NEXT: s_cbranch_scc0 .LBB77_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB77_4
+; VI-NEXT: s_cbranch_execnz .LBB77_3
; VI-NEXT: .LBB77_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
; VI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -45841,10 +44698,8 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
-; VI-NEXT: s_branch .LBB77_5
+; VI-NEXT: s_branch .LBB77_4
; VI-NEXT: .LBB77_3:
-; VI-NEXT: s_branch .LBB77_2
-; VI-NEXT: .LBB77_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -45861,7 +44716,7 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB77_5: ; %end
+; VI-NEXT: .LBB77_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v16, 1
; VI-NEXT: v_readlane_b32 s30, v16, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -45882,9 +44737,9 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB77_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB77_4
+; GFX9-NEXT: s_cbranch_execnz .LBB77_3
; GFX9-NEXT: .LBB77_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -45894,10 +44749,8 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
-; GFX9-NEXT: s_branch .LBB77_5
+; GFX9-NEXT: s_branch .LBB77_4
; GFX9-NEXT: .LBB77_3:
-; GFX9-NEXT: s_branch .LBB77_2
-; GFX9-NEXT: .LBB77_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -45914,7 +44767,7 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB77_5: ; %end
+; GFX9-NEXT: .LBB77_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -45932,10 +44785,10 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB77_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB77_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB77_3
; GFX11-NEXT: .LBB77_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
@@ -45947,8 +44800,6 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB77_3:
-; GFX11-NEXT: s_branch .LBB77_2
-; GFX11-NEXT: .LBB77_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -46411,7 +45262,7 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; SI-NEXT: v_writelane_b32 v18, s50, 6
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v18, s51, 7
-; SI-NEXT: s_cbranch_scc0 .LBB79_3
+; SI-NEXT: s_cbranch_scc0 .LBB79_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s63, 16
@@ -46461,7 +45312,7 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s51, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB79_4
+; SI-NEXT: s_cbranch_execnz .LBB79_3
; SI-NEXT: .LBB79_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s63
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -46591,11 +45442,8 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v16
; SI-NEXT: v_or_b32_e32 v15, v17, v15
-; SI-NEXT: s_branch .LBB79_5
+; SI-NEXT: s_branch .LBB79_4
; SI-NEXT: .LBB79_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51
-; SI-NEXT: s_branch .LBB79_2
-; SI-NEXT: .LBB79_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -46612,7 +45460,7 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v13, s49
; SI-NEXT: v_mov_b32_e32 v14, s50
; SI-NEXT: v_mov_b32_e32 v15, s51
-; SI-NEXT: .LBB79_5: ; %end
+; SI-NEXT: .LBB79_4: ; %end
; SI-NEXT: v_readlane_b32 s51, v18, 7
; SI-NEXT: v_readlane_b32 s50, v18, 6
; SI-NEXT: v_readlane_b32 s49, v18, 5
@@ -46639,9 +45487,9 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB79_3
+; VI-NEXT: s_cbranch_scc0 .LBB79_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB79_4
+; VI-NEXT: s_cbranch_execnz .LBB79_3
; VI-NEXT: .LBB79_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s31, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -46724,10 +45572,8 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; VI-NEXT: v_add_f16_sdwa v16, v16, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v16
-; VI-NEXT: s_branch .LBB79_5
+; VI-NEXT: s_branch .LBB79_4
; VI-NEXT: .LBB79_3:
-; VI-NEXT: s_branch .LBB79_2
-; VI-NEXT: .LBB79_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -46744,7 +45590,7 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB79_5: ; %end
+; VI-NEXT: .LBB79_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v17, 1
; VI-NEXT: v_readlane_b32 s30, v17, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -46765,9 +45611,9 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB79_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB79_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB79_4
+; GFX9-NEXT: s_cbranch_execnz .LBB79_3
; GFX9-NEXT: .LBB79_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, s31, v0 op_sel_hi:[1,0]
@@ -46786,10 +45632,8 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB79_5
+; GFX9-NEXT: s_branch .LBB79_4
; GFX9-NEXT: .LBB79_3:
-; GFX9-NEXT: s_branch .LBB79_2
-; GFX9-NEXT: .LBB79_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -46806,7 +45650,7 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB79_5: ; %end
+; GFX9-NEXT: .LBB79_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -46824,10 +45668,10 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB79_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB79_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB79_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB79_3
; GFX11-NEXT: .LBB79_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
@@ -46847,8 +45691,6 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB79_3:
-; GFX11-NEXT: s_branch .LBB79_2
-; GFX11-NEXT: .LBB79_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -47156,7 +45998,7 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; SI-NEXT: v_readfirstlane_b32 s5, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v0
-; SI-NEXT: s_cbranch_scc0 .LBB81_3
+; SI-NEXT: s_cbranch_scc0 .LBB81_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s79, s5, 0xffff0000
; SI-NEXT: s_lshl_b32 s78, s5, 16
@@ -47190,7 +46032,7 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; SI-NEXT: s_lshl_b32 s10, s17, 16
; SI-NEXT: s_and_b32 s9, s16, 0xffff0000
; SI-NEXT: s_lshl_b32 s8, s16, 16
-; SI-NEXT: s_cbranch_execnz .LBB81_4
+; SI-NEXT: s_cbranch_execnz .LBB81_3
; SI-NEXT: .LBB81_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[19:20], s[22:23], 1.0
; SI-NEXT: v_add_f64 v[14:15], s[24:25], 1.0
@@ -47232,42 +46074,8 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v31
; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v30
; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30
-; SI-NEXT: s_branch .LBB81_5
+; SI-NEXT: s_branch .LBB81_4
; SI-NEXT: .LBB81_3:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: s_branch .LBB81_2
-; SI-NEXT: .LBB81_4:
; SI-NEXT: v_mov_b32_e32 v1, s79
; SI-NEXT: v_mov_b32_e32 v0, s78
; SI-NEXT: v_mov_b32_e32 v3, s77
@@ -47300,7 +46108,7 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; SI-NEXT: v_mov_b32_e32 v28, s10
; SI-NEXT: v_mov_b32_e32 v31, s9
; SI-NEXT: v_mov_b32_e32 v30, s8
-; SI-NEXT: .LBB81_5: ; %end
+; SI-NEXT: .LBB81_4: ; %end
; SI-NEXT: v_mul_f32_e32 v31, 1.0, v31
; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v31
; SI-NEXT: v_mul_f32_e32 v30, 1.0, v30
@@ -47379,9 +46187,9 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB81_3
+; VI-NEXT: s_cbranch_scc0 .LBB81_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB81_4
+; VI-NEXT: s_cbranch_execnz .LBB81_3
; VI-NEXT: .LBB81_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
; VI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -47391,10 +46199,8 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
-; VI-NEXT: s_branch .LBB81_5
+; VI-NEXT: s_branch .LBB81_4
; VI-NEXT: .LBB81_3:
-; VI-NEXT: s_branch .LBB81_2
-; VI-NEXT: .LBB81_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -47411,7 +46217,7 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB81_5: ; %end
+; VI-NEXT: .LBB81_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v16, 1
; VI-NEXT: v_readlane_b32 s30, v16, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -47432,9 +46238,9 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB81_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB81_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB81_4
+; GFX9-NEXT: s_cbranch_execnz .LBB81_3
; GFX9-NEXT: .LBB81_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[14:15], s[30:31], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
@@ -47444,10 +46250,8 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
-; GFX9-NEXT: s_branch .LBB81_5
+; GFX9-NEXT: s_branch .LBB81_4
; GFX9-NEXT: .LBB81_3:
-; GFX9-NEXT: s_branch .LBB81_2
-; GFX9-NEXT: .LBB81_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -47464,7 +46268,7 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB81_5: ; %end
+; GFX9-NEXT: .LBB81_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -47482,10 +46286,10 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB81_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB81_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB81_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB81_3
; GFX11-NEXT: .LBB81_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
@@ -47497,8 +46301,6 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB81_3:
-; GFX11-NEXT: s_branch .LBB81_2
-; GFX11-NEXT: .LBB81_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -49020,7 +47822,7 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; SI-NEXT: v_mul_f32_e64 v21, 1.0, s5
; SI-NEXT: v_mul_f32_e64 v19, 1.0, s44
; SI-NEXT: v_mul_f32_e64 v17, 1.0, s42
-; SI-NEXT: s_cbranch_scc0 .LBB83_4
+; SI-NEXT: s_cbranch_scc0 .LBB83_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v41
; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v63
@@ -49173,9 +47975,6 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB83_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB83_2
;
; VI-LABEL: bitcast_v32bf16_to_v8f64_scalar:
; VI: ; %bb.0:
@@ -49189,9 +47988,9 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB83_3
+; VI-NEXT: s_cbranch_scc0 .LBB83_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB83_4
+; VI-NEXT: s_cbranch_execnz .LBB83_3
; VI-NEXT: .LBB83_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v16, 0x40c00000
; VI-NEXT: s_lshl_b32 s4, s30, 16
@@ -49490,10 +48289,8 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; VI-NEXT: v_lshrrev_b64 v[16:17], 16, v[17:18]
; VI-NEXT: v_mov_b32_e32 v13, v15
; VI-NEXT: v_mov_b32_e32 v15, v16
-; VI-NEXT: s_branch .LBB83_5
+; VI-NEXT: s_branch .LBB83_4
; VI-NEXT: .LBB83_3:
-; VI-NEXT: s_branch .LBB83_2
-; VI-NEXT: .LBB83_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -49510,7 +48307,7 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB83_5: ; %end
+; VI-NEXT: .LBB83_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v20, 1
; VI-NEXT: v_readlane_b32 s30, v20, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -49531,9 +48328,9 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB83_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB83_4
+; GFX9-NEXT: s_cbranch_execnz .LBB83_3
; GFX9-NEXT: .LBB83_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s31, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -49841,10 +48638,8 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; GFX9-NEXT: v_cndmask_b32_e32 v17, v18, v19, vcc
; GFX9-NEXT: v_and_b32_sdwa v16, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v16
-; GFX9-NEXT: s_branch .LBB83_5
+; GFX9-NEXT: s_branch .LBB83_4
; GFX9-NEXT: .LBB83_3:
-; GFX9-NEXT: s_branch .LBB83_2
-; GFX9-NEXT: .LBB83_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -49861,7 +48656,7 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB83_5: ; %end
+; GFX9-NEXT: .LBB83_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v20, 1
; GFX9-NEXT: v_readlane_b32 s30, v20, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -49879,10 +48674,10 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB83_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB83_3
; GFX11-TRUE16-NEXT: .LBB83_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s0, s27, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s27, 16
@@ -50194,8 +48989,6 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v17.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB83_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB83_2
-; GFX11-TRUE16-NEXT: .LBB83_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -50215,10 +49008,10 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB83_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB83_3
; GFX11-FAKE16-NEXT: .LBB83_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_and_b32 s1, s27, 0xffff0000
; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s27, 16
@@ -50549,8 +49342,6 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v20, 16, v17
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB83_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB83_2
-; GFX11-FAKE16-NEXT: .LBB83_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -52048,7 +50839,7 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v1
; SI-NEXT: v_writelane_b32 v40, s87, 31
-; SI-NEXT: s_cbranch_scc0 .LBB85_3
+; SI-NEXT: s_cbranch_scc0 .LBB85_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s48, s5, 24
; SI-NEXT: s_lshr_b32 s49, s5, 16
@@ -52098,7 +50889,7 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; SI-NEXT: s_lshr_b64 s[58:59], s[16:17], 24
; SI-NEXT: s_lshr_b64 s[62:63], s[16:17], 16
; SI-NEXT: s_lshr_b64 s[74:75], s[16:17], 8
-; SI-NEXT: s_cbranch_execnz .LBB85_4
+; SI-NEXT: s_cbranch_execnz .LBB85_3
; SI-NEXT: .LBB85_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[28:29], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[5:6], s[26:27], 1.0
@@ -52164,58 +50955,8 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; SI-NEXT: s_lshr_b32 s85, s17, 24
; SI-NEXT: s_lshr_b32 s86, s17, 16
; SI-NEXT: s_lshr_b32 s87, s17, 8
-; SI-NEXT: s_branch .LBB85_5
+; SI-NEXT: s_branch .LBB85_4
; SI-NEXT: .LBB85_3:
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr87
-; SI-NEXT: ; implicit-def: $sgpr86
-; SI-NEXT: ; implicit-def: $sgpr85
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr84
-; SI-NEXT: ; implicit-def: $sgpr83
-; SI-NEXT: ; implicit-def: $sgpr82
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr81
-; SI-NEXT: ; implicit-def: $sgpr80
-; SI-NEXT: ; implicit-def: $sgpr71
-; SI-NEXT: ; implicit-def: $sgpr70
-; SI-NEXT: ; implicit-def: $sgpr69
-; SI-NEXT: ; implicit-def: $sgpr68
-; SI-NEXT: ; implicit-def: $sgpr67
-; SI-NEXT: ; implicit-def: $sgpr66
-; SI-NEXT: ; implicit-def: $sgpr65
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB85_2
-; SI-NEXT: .LBB85_4:
; SI-NEXT: v_mov_b32_e32 v32, s16
; SI-NEXT: v_mov_b32_e32 v28, s18
; SI-NEXT: v_mov_b32_e32 v20, s20
@@ -52248,7 +50989,7 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v11, s14
; SI-NEXT: v_mov_b32_e32 v10, s10
; SI-NEXT: v_mov_b32_e32 v9, s6
-; SI-NEXT: .LBB85_5: ; %end
+; SI-NEXT: .LBB85_4: ; %end
; SI-NEXT: v_and_b32_e32 v2, 0xff, v32
; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v53
; SI-NEXT: s_and_b32 s4, s17, 0xff
@@ -52530,7 +51271,7 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 ; 4-byte Folded Spill
-; VI-NEXT: s_cbranch_scc0 .LBB85_3
+; VI-NEXT: s_cbranch_scc0 .LBB85_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s35, s5, 24
; VI-NEXT: s_lshr_b32 s34, s5, 16
@@ -52580,7 +51321,7 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; VI-NEXT: s_lshr_b64 s[10:11], s[20:21], 24
; VI-NEXT: s_lshr_b64 s[8:9], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[6:7], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB85_4
+; VI-NEXT: s_cbranch_execnz .LBB85_3
; VI-NEXT: .LBB85_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[9:10], s[4:5], 1.0
; VI-NEXT: v_add_f64 v[1:2], s[28:29], 1.0
@@ -52644,58 +51385,8 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; VI-NEXT: v_lshrrev_b32_e32 v18, 8, v16
; VI-NEXT: v_lshrrev_b32_e32 v57, 16, v15
; VI-NEXT: v_lshrrev_b32_e32 v58, 8, v15
-; VI-NEXT: s_branch .LBB85_5
+; VI-NEXT: s_branch .LBB85_4
; VI-NEXT: .LBB85_3:
-; VI-NEXT: ; implicit-def: $sgpr66
-; VI-NEXT: ; implicit-def: $sgpr67
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr64
-; VI-NEXT: ; implicit-def: $sgpr65
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr54
-; VI-NEXT: ; implicit-def: $sgpr55
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr52
-; VI-NEXT: ; implicit-def: $sgpr53
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr50
-; VI-NEXT: ; implicit-def: $sgpr51
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr48
-; VI-NEXT: ; implicit-def: $sgpr49
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr39
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr37
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: s_branch .LBB85_2
-; VI-NEXT: .LBB85_4:
; VI-NEXT: v_mov_b32_e32 v26, s35
; VI-NEXT: v_mov_b32_e32 v25, s40
; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
@@ -52767,7 +51458,7 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v25, s35
-; VI-NEXT: .LBB85_5: ; %end
+; VI-NEXT: .LBB85_4: ; %end
; VI-NEXT: s_mov_b32 s4, 0xc0c0004
; VI-NEXT: v_perm_b32 v24, v57, v24, s4
; VI-NEXT: v_perm_b32 v15, v15, v58, s4
@@ -52955,7 +51646,7 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB85_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB85_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s95, s5, 24
; GFX9-NEXT: s_lshr_b32 s94, s5, 16
@@ -53005,7 +51696,7 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; GFX9-NEXT: s_lshr_b64 s[10:11], s[20:21], 24
; GFX9-NEXT: s_lshr_b64 s[8:9], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[6:7], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB85_4
+; GFX9-NEXT: s_cbranch_execnz .LBB85_3
; GFX9-NEXT: .LBB85_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[11:12], s[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[1:2], s[28:29], 1.0
@@ -53072,58 +51763,8 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; GFX9-NEXT: v_lshrrev_b32_e32 v18, 8, v16
; GFX9-NEXT: v_lshrrev_b32_e32 v57, 16, v15
; GFX9-NEXT: v_lshrrev_b32_e32 v59, 8, v15
-; GFX9-NEXT: s_branch .LBB85_5
+; GFX9-NEXT: s_branch .LBB85_4
; GFX9-NEXT: .LBB85_3:
-; GFX9-NEXT: ; implicit-def: $sgpr54
-; GFX9-NEXT: ; implicit-def: $sgpr55
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr52
-; GFX9-NEXT: ; implicit-def: $sgpr53
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr50
-; GFX9-NEXT: ; implicit-def: $sgpr51
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr48
-; GFX9-NEXT: ; implicit-def: $sgpr49
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr38
-; GFX9-NEXT: ; implicit-def: $sgpr39
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr36
-; GFX9-NEXT: ; implicit-def: $sgpr37
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr35
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr31
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: s_branch .LBB85_2
-; GFX9-NEXT: .LBB85_4:
; GFX9-NEXT: v_mov_b32_e32 v26, s31
; GFX9-NEXT: v_mov_b32_e32 v25, s40
; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
@@ -53197,7 +51838,7 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; GFX9-NEXT: .LBB85_5: ; %end
+; GFX9-NEXT: .LBB85_4: ; %end
; GFX9-NEXT: s_mov_b32 s4, 0xc0c0004
; GFX9-NEXT: v_perm_b32 v24, v57, v24, s4
; GFX9-NEXT: v_perm_b32 v15, v15, v59, s4
@@ -53345,7 +51986,7 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; GFX11-NEXT: v_writelane_b32 v40, s39, 7
; GFX11-NEXT: v_writelane_b32 v40, s48, 8
; GFX11-NEXT: v_writelane_b32 v40, s49, 9
-; GFX11-NEXT: s_cbranch_scc0 .LBB85_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB85_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s90, s27, 24
; GFX11-NEXT: s_lshr_b32 s89, s27, 16
@@ -53396,7 +52037,7 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
-; GFX11-NEXT: s_cbranch_vccnz .LBB85_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB85_3
; GFX11-NEXT: .LBB85_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[7:8], s[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[11:12], s[18:19], 1.0
@@ -53454,58 +52095,8 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v86, 8, v22
; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v21
; GFX11-NEXT: v_lshrrev_b32_e32 v81, 8, v21
-; GFX11-NEXT: s_branch .LBB85_5
+; GFX11-NEXT: s_branch .LBB85_4
; GFX11-NEXT: .LBB85_3:
-; GFX11-NEXT: ; implicit-def: $sgpr48
-; GFX11-NEXT: ; implicit-def: $sgpr49
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr37
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $sgpr35
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $sgpr31
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr95
-; GFX11-NEXT: ; implicit-def: $vcc_hi
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr75
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr93
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr77
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr79
-; GFX11-NEXT: ; implicit-def: $sgpr91
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr89
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: s_branch .LBB85_2
-; GFX11-NEXT: .LBB85_4:
; GFX11-NEXT: v_dual_mov_b32 v21, s0 :: v_dual_mov_b32 v2, s27
; GFX11-NEXT: v_dual_mov_b32 v17, s2 :: v_dual_mov_b32 v4, s25
; GFX11-NEXT: v_dual_mov_b32 v15, s16 :: v_dual_mov_b32 v6, s23
@@ -53538,7 +52129,7 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v84, s56 :: v_dual_mov_b32 v85, s47
; GFX11-NEXT: v_dual_mov_b32 v83, s46 :: v_dual_mov_b32 v96, s44
; GFX11-NEXT: v_dual_mov_b32 v87, s45 :: v_dual_mov_b32 v86, s43
-; GFX11-NEXT: .LBB85_5: ; %end
+; GFX11-NEXT: .LBB85_4: ; %end
; GFX11-NEXT: v_perm_b32 v26, v69, v26, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_perm_b32 v27, v82, v27, 0xc0c0004
@@ -55991,7 +54582,7 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v37
; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v27, 8, v27
-; SI-NEXT: s_cbranch_scc0 .LBB87_4
+; SI-NEXT: s_cbranch_scc0 .LBB87_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s6, 0xff
; SI-NEXT: v_or_b32_e32 v0, s4, v34
@@ -56355,9 +54946,6 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: .LBB87_3: ; %end
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB87_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB87_2
;
; VI-LABEL: bitcast_v64i8_to_v8f64_scalar:
; VI: ; %bb.0:
@@ -56416,7 +55004,7 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v35
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB87_4
+; VI-NEXT: s_cbranch_scc0 .LBB87_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v11, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -56666,9 +55254,6 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: .LBB87_3: ; %end
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB87_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB87_2
;
; GFX9-LABEL: bitcast_v64i8_to_v8f64_scalar:
; GFX9: ; %bb.0:
@@ -56727,7 +55312,7 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: s_waitcnt vmcnt(19)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v35
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB87_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB87_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v11, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -56966,9 +55551,6 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: .LBB87_3: ; %end
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB87_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB87_2
;
; GFX11-TRUE16-LABEL: bitcast_v64i8_to_v8f64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -57025,7 +55607,7 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v54
; GFX11-TRUE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB87_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB87_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v11, 0xc0c0004
; GFX11-TRUE16-NEXT: v_perm_b32 v16, v37, v34, 0xc0c0004
@@ -57250,9 +55832,6 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: .LBB87_3: ; %end
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB87_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB87_2
;
; GFX11-FAKE16-LABEL: bitcast_v64i8_to_v8f64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -57309,7 +55888,7 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v54
; GFX11-FAKE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB87_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB87_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v11, 0xc0c0004
; GFX11-FAKE16-NEXT: v_perm_b32 v16, v37, v34, 0xc0c0004
@@ -57534,9 +56113,6 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: .LBB87_3: ; %end
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB87_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB87_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -58038,7 +56614,7 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; SI-NEXT: s_lshr_b32 s38, s39, 16
; SI-NEXT: v_readfirstlane_b32 s4, v2
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB89_4
+; SI-NEXT: s_cbranch_scc0 .LBB89_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s5, s17, 0xffff
; SI-NEXT: s_lshl_b32 s7, s78, 16
@@ -58280,24 +56856,6 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB89_4:
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: s_branch .LBB89_2
;
; VI-LABEL: bitcast_v32i16_to_v32f16_scalar:
; VI: ; %bb.0:
@@ -58306,7 +56864,7 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB89_4
+; VI-NEXT: s_cbranch_scc0 .LBB89_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB89_3
; VI-NEXT: .LBB89_2: ; %cmp.true
@@ -58408,8 +56966,6 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v14, s7
; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB89_4:
-; VI-NEXT: s_branch .LBB89_2
;
; GFX9-LABEL: bitcast_v32i16_to_v32f16_scalar:
; GFX9: ; %bb.0:
@@ -58423,9 +56979,9 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB89_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB89_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB89_4
+; GFX9-NEXT: s_cbranch_execnz .LBB89_3
; GFX9-NEXT: .LBB89_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, s31, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, s30, 3 op_sel_hi:[1,0]
@@ -58443,10 +56999,8 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB89_5
+; GFX9-NEXT: s_branch .LBB89_4
; GFX9-NEXT: .LBB89_3:
-; GFX9-NEXT: s_branch .LBB89_2
-; GFX9-NEXT: .LBB89_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -58463,7 +57017,7 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB89_5: ; %end
+; GFX9-NEXT: .LBB89_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -58481,10 +57035,10 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB89_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB89_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB89_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB89_3
; GFX11-NEXT: .LBB89_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
@@ -58504,8 +57058,6 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB89_3:
-; GFX11-NEXT: s_branch .LBB89_2
-; GFX11-NEXT: .LBB89_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -58908,9 +57460,9 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s15, s6, 16
; SI-NEXT: v_readfirstlane_b32 s4, v2
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB91_3
+; SI-NEXT: s_cbranch_scc0 .LBB91_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB91_4
+; SI-NEXT: s_cbranch_execnz .LBB91_3
; SI-NEXT: .LBB91_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
; SI-NEXT: v_cvt_f32_f16_e32 v2, s46
@@ -59048,10 +57600,8 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i
; SI-NEXT: v_lshr_b64 v[20:21], v[10:11], 16
; SI-NEXT: v_lshr_b64 v[18:19], v[12:13], 16
; SI-NEXT: v_lshr_b64 v[16:17], v[14:15], 16
-; SI-NEXT: s_branch .LBB91_5
+; SI-NEXT: s_branch .LBB91_4
; SI-NEXT: .LBB91_3:
-; SI-NEXT: s_branch .LBB91_2
-; SI-NEXT: .LBB91_4:
; SI-NEXT: v_mov_b32_e32 v32, s44
; SI-NEXT: v_mov_b32_e32 v38, s41
; SI-NEXT: v_mov_b32_e32 v33, s42
@@ -59084,7 +57634,7 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v20, s14
; SI-NEXT: v_mov_b32_e32 v18, s13
; SI-NEXT: v_mov_b32_e32 v16, s15
-; SI-NEXT: .LBB91_5: ; %end
+; SI-NEXT: .LBB91_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v30
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v55
; SI-NEXT: v_or_b32_e32 v0, v2, v0
@@ -59147,9 +57697,9 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB91_3
+; VI-NEXT: s_cbranch_scc0 .LBB91_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB91_4
+; VI-NEXT: s_cbranch_execnz .LBB91_3
; VI-NEXT: .LBB91_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s5, s30, 16
; VI-NEXT: v_mov_b32_e32 v1, 0x200
@@ -59232,10 +57782,8 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v1, s17, v1
; VI-NEXT: v_or_b32_e32 v1, v1, v17
; VI-NEXT: v_or_b32_e32 v0, v0, v16
-; VI-NEXT: s_branch .LBB91_5
+; VI-NEXT: s_branch .LBB91_4
; VI-NEXT: .LBB91_3:
-; VI-NEXT: s_branch .LBB91_2
-; VI-NEXT: .LBB91_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -59252,7 +57800,7 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB91_5: ; %end
+; VI-NEXT: .LBB91_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v18, 1
; VI-NEXT: v_readlane_b32 s30, v18, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -59273,9 +57821,9 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB91_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB91_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB91_4
+; GFX9-NEXT: s_cbranch_execnz .LBB91_3
; GFX9-NEXT: .LBB91_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, s31, v0 op_sel_hi:[1,0]
@@ -59294,10 +57842,8 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB91_5
+; GFX9-NEXT: s_branch .LBB91_4
; GFX9-NEXT: .LBB91_3:
-; GFX9-NEXT: s_branch .LBB91_2
-; GFX9-NEXT: .LBB91_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -59314,7 +57860,7 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB91_5: ; %end
+; GFX9-NEXT: .LBB91_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -59332,10 +57878,10 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB91_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB91_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB91_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB91_3
; GFX11-NEXT: .LBB91_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
@@ -59355,8 +57901,6 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB91_3:
-; GFX11-NEXT: s_branch .LBB91_2
-; GFX11-NEXT: .LBB91_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -59814,7 +58358,7 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; SI-NEXT: s_lshr_b32 s36, s37, 16
; SI-NEXT: v_readfirstlane_b32 s4, v2
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB93_4
+; SI-NEXT: s_cbranch_scc0 .LBB93_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s73, s16, 16
; SI-NEXT: s_lshl_b32 s77, s78, 16
@@ -60040,40 +58584,6 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB93_4:
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr43
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: s_branch .LBB93_2
;
; VI-LABEL: bitcast_v32i16_to_v32bf16_scalar:
; VI: ; %bb.0:
@@ -60082,7 +58592,7 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; VI-NEXT: v_readfirstlane_b32 s6, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_cbranch_scc0 .LBB93_4
+; VI-NEXT: s_cbranch_scc0 .LBB93_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB93_3
; VI-NEXT: .LBB93_2: ; %cmp.true
@@ -60184,8 +58694,6 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; VI-NEXT: v_mov_b32_e32 v14, s7
; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB93_4:
-; VI-NEXT: s_branch .LBB93_2
;
; GFX9-LABEL: bitcast_v32i16_to_v32bf16_scalar:
; GFX9: ; %bb.0:
@@ -60199,9 +58707,9 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB93_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB93_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB93_4
+; GFX9-NEXT: s_cbranch_execnz .LBB93_3
; GFX9-NEXT: .LBB93_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, s31, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, s30, 3 op_sel_hi:[1,0]
@@ -60219,10 +58727,8 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB93_5
+; GFX9-NEXT: s_branch .LBB93_4
; GFX9-NEXT: .LBB93_3:
-; GFX9-NEXT: s_branch .LBB93_2
-; GFX9-NEXT: .LBB93_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -60239,7 +58745,7 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB93_5: ; %end
+; GFX9-NEXT: .LBB93_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -60257,10 +58763,10 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB93_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB93_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB93_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB93_3
; GFX11-NEXT: .LBB93_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
@@ -60280,8 +58786,6 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB93_3:
-; GFX11-NEXT: s_branch .LBB93_2
-; GFX11-NEXT: .LBB93_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -61871,7 +60375,7 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; SI-NEXT: v_mul_f32_e64 v28, 1.0, s45
; SI-NEXT: v_mul_f32_e64 v55, 1.0, s42
; SI-NEXT: v_mul_f32_e64 v16, 1.0, s43
-; SI-NEXT: s_cbranch_scc0 .LBB95_4
+; SI-NEXT: s_cbranch_scc0 .LBB95_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v63
; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v6
@@ -62086,40 +60590,6 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; SI-NEXT: v_or_b32_e32 v15, v15, v16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB95_4:
-; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr52
-; SI-NEXT: ; implicit-def: $vgpr51
-; SI-NEXT: ; implicit-def: $vgpr9
-; SI-NEXT: ; implicit-def: $vgpr53
-; SI-NEXT: ; implicit-def: $vgpr49
-; SI-NEXT: ; implicit-def: $vgpr13
-; SI-NEXT: ; implicit-def: $vgpr54
-; SI-NEXT: ; implicit-def: $vgpr38
-; SI-NEXT: ; implicit-def: $vgpr23
-; SI-NEXT: ; implicit-def: $vgpr39
-; SI-NEXT: ; implicit-def: $vgpr35
-; SI-NEXT: ; implicit-def: $vgpr24
-; SI-NEXT: ; implicit-def: $vgpr40
-; SI-NEXT: ; implicit-def: $vgpr36
-; SI-NEXT: ; implicit-def: $vgpr25
-; SI-NEXT: ; implicit-def: $vgpr41
-; SI-NEXT: ; implicit-def: $vgpr33
-; SI-NEXT: ; implicit-def: $vgpr26
-; SI-NEXT: ; implicit-def: $vgpr42
-; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: ; implicit-def: $vgpr27
-; SI-NEXT: ; implicit-def: $vgpr43
-; SI-NEXT: ; implicit-def: $vgpr29
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: ; implicit-def: $vgpr21
-; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: ; implicit-def: $vgpr19
-; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: ; implicit-def: $vgpr17
-; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: s_branch .LBB95_2
;
; VI-LABEL: bitcast_v32bf16_to_v32i16_scalar:
; VI: ; %bb.0:
@@ -62133,9 +60603,9 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB95_3
+; VI-NEXT: s_cbranch_scc0 .LBB95_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB95_4
+; VI-NEXT: s_cbranch_execnz .LBB95_3
; VI-NEXT: .LBB95_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v16, 0x40c00000
; VI-NEXT: s_lshl_b32 s4, s26, 16
@@ -62433,10 +60903,8 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; VI-NEXT: v_lshrrev_b64 v[17:18], 16, v[17:18]
; VI-NEXT: v_lshrrev_b64 v[15:16], 16, v[15:16]
; VI-NEXT: v_mov_b32_e32 v13, v17
-; VI-NEXT: s_branch .LBB95_5
+; VI-NEXT: s_branch .LBB95_4
; VI-NEXT: .LBB95_3:
-; VI-NEXT: s_branch .LBB95_2
-; VI-NEXT: .LBB95_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -62453,7 +60921,7 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB95_5: ; %end
+; VI-NEXT: .LBB95_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v20, 1
; VI-NEXT: v_readlane_b32 s30, v20, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -62474,9 +60942,9 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB95_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB95_4
+; GFX9-NEXT: s_cbranch_execnz .LBB95_3
; GFX9-NEXT: .LBB95_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: s_and_b32 s5, s30, 0xffff0000
@@ -62768,10 +61236,8 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; GFX9-NEXT: v_cndmask_b32_e32 v0, v18, v19, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GFX9-NEXT: v_and_or_b32 v0, v17, v16, v0
-; GFX9-NEXT: s_branch .LBB95_5
+; GFX9-NEXT: s_branch .LBB95_4
; GFX9-NEXT: .LBB95_3:
-; GFX9-NEXT: s_branch .LBB95_2
-; GFX9-NEXT: .LBB95_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -62788,7 +61254,7 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB95_5: ; %end
+; GFX9-NEXT: .LBB95_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v20, 1
; GFX9-NEXT: v_readlane_b32 s30, v20, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -62806,10 +61272,10 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB95_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB95_3
; GFX11-TRUE16-NEXT: .LBB95_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s0, s12, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s12, 16
@@ -63080,8 +61546,6 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v33.h
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB95_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB95_2
-; GFX11-TRUE16-NEXT: .LBB95_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -63101,10 +61565,10 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB95_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB95_3
; GFX11-FAKE16-NEXT: .LBB95_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_and_b32 s0, s12, 0xffff0000
; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s12, 16
@@ -63400,8 +61864,6 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v24
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB95_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB95_2
-; GFX11-FAKE16-NEXT: .LBB95_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -66049,7 +64511,7 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s4, v1
; VI-NEXT: v_writelane_b32 v4, s67, 19
-; VI-NEXT: s_cbranch_scc0 .LBB97_4
+; VI-NEXT: s_cbranch_scc0 .LBB97_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s56, s5, 24
; VI-NEXT: s_lshr_b32 s57, s5, 16
@@ -66383,56 +64845,6 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB97_4:
-; VI-NEXT: ; implicit-def: $sgpr67
-; VI-NEXT: ; implicit-def: $sgpr66
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr65
-; VI-NEXT: ; implicit-def: $sgpr64
-; VI-NEXT: ; implicit-def: $sgpr55
-; VI-NEXT: ; implicit-def: $sgpr54
-; VI-NEXT: ; implicit-def: $sgpr53
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr52
-; VI-NEXT: ; implicit-def: $sgpr51
-; VI-NEXT: ; implicit-def: $sgpr50
-; VI-NEXT: ; implicit-def: $sgpr49
-; VI-NEXT: ; implicit-def: $sgpr48
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr39
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr37
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: s_branch .LBB97_2
;
; GFX9-LABEL: bitcast_v32i16_to_v64i8_scalar:
; GFX9: ; %bb.0:
@@ -66475,7 +64887,7 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB97_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB97_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s56, s5, 24
; GFX9-NEXT: s_lshr_b32 s57, s5, 16
@@ -66525,7 +64937,7 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; GFX9-NEXT: s_lshr_b64 s[10:11], s[20:21], 24
; GFX9-NEXT: s_lshr_b64 s[8:9], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[6:7], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB97_4
+; GFX9-NEXT: s_cbranch_execnz .LBB97_3
; GFX9-NEXT: .LBB97_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v6, s27, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v5, s26, 3 op_sel_hi:[1,0]
@@ -66597,58 +65009,8 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; GFX9-NEXT: v_lshrrev_b32_e32 v62, 8, v19
; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v18
; GFX9-NEXT: v_lshrrev_b32_e32 v15, 8, v18
-; GFX9-NEXT: s_branch .LBB97_5
+; GFX9-NEXT: s_branch .LBB97_4
; GFX9-NEXT: .LBB97_3:
-; GFX9-NEXT: ; implicit-def: $sgpr55
-; GFX9-NEXT: ; implicit-def: $sgpr53
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr54
-; GFX9-NEXT: ; implicit-def: $sgpr52
-; GFX9-NEXT: ; implicit-def: $sgpr51
-; GFX9-NEXT: ; implicit-def: $sgpr50
-; GFX9-NEXT: ; implicit-def: $sgpr48
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr49
-; GFX9-NEXT: ; implicit-def: $sgpr39
-; GFX9-NEXT: ; implicit-def: $sgpr38
-; GFX9-NEXT: ; implicit-def: $sgpr37
-; GFX9-NEXT: ; implicit-def: $sgpr35
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr36
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr31
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: s_branch .LBB97_2
-; GFX9-NEXT: .LBB97_4:
; GFX9-NEXT: v_mov_b32_e32 v20, s44
; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
@@ -66719,7 +65081,7 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v20, s40
; GFX9-NEXT: v_mov_b32_e32 v21, s14
-; GFX9-NEXT: .LBB97_5: ; %end
+; GFX9-NEXT: .LBB97_4: ; %end
; GFX9-NEXT: s_mov_b32 s4, 0xc0c0004
; GFX9-NEXT: v_perm_b32 v16, v16, v25, s4
; GFX9-NEXT: v_perm_b32 v15, v18, v15, s4
@@ -66864,7 +65226,7 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; GFX11-NEXT: v_writelane_b32 v40, s39, 7
; GFX11-NEXT: v_writelane_b32 v40, s48, 8
; GFX11-NEXT: v_writelane_b32 v40, s49, 9
-; GFX11-NEXT: s_cbranch_scc0 .LBB97_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB97_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s43, s27, 24
; GFX11-NEXT: s_lshr_b32 s44, s27, 16
@@ -66915,7 +65277,7 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
-; GFX11-NEXT: s_cbranch_vccnz .LBB97_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB97_3
; GFX11-NEXT: .LBB97_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v4, s25, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v3, s24, 3 op_sel_hi:[1,0]
@@ -66981,58 +65343,8 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v83, 8, v18
; GFX11-NEXT: v_lshrrev_b32_e32 v96, 16, v17
; GFX11-NEXT: v_lshrrev_b32_e32 v85, 8, v17
-; GFX11-NEXT: s_branch .LBB97_5
+; GFX11-NEXT: s_branch .LBB97_4
; GFX11-NEXT: .LBB97_3:
-; GFX11-NEXT: ; implicit-def: $sgpr49
-; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr48
-; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr37
-; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr35
-; GFX11-NEXT: ; implicit-def: $sgpr31
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $vcc_hi
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr95
-; GFX11-NEXT: ; implicit-def: $sgpr93
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr91
-; GFX11-NEXT: ; implicit-def: $sgpr89
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr79
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr77
-; GFX11-NEXT: ; implicit-def: $sgpr75
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: s_branch .LBB97_2
-; GFX11-NEXT: .LBB97_4:
; GFX11-NEXT: v_dual_mov_b32 v17, s0 :: v_dual_mov_b32 v18, s1
; GFX11-NEXT: v_dual_mov_b32 v13, s2 :: v_dual_mov_b32 v14, s3
; GFX11-NEXT: v_dual_mov_b32 v11, s16 :: v_dual_mov_b32 v12, s17
@@ -67065,7 +65377,7 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v20, s14 :: v_dual_mov_b32 v21, s12
; GFX11-NEXT: v_dual_mov_b32 v22, s10 :: v_dual_mov_b32 v23, s8
; GFX11-NEXT: v_dual_mov_b32 v24, s6 :: v_dual_mov_b32 v25, s4
-; GFX11-NEXT: .LBB97_5: ; %end
+; GFX11-NEXT: .LBB97_4: ; %end
; GFX11-NEXT: v_perm_b32 v86, v86, v87, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_perm_b32 v24, v84, v24, 0xc0c0004
@@ -70481,7 +68793,7 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v35
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB99_4
+; VI-NEXT: s_cbranch_scc0 .LBB99_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v11, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -70737,9 +69049,6 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: .LBB99_3: ; %end
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB99_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB99_2
;
; GFX9-LABEL: bitcast_v64i8_to_v32i16_scalar:
; GFX9: ; %bb.0:
@@ -70798,7 +69107,7 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: s_waitcnt vmcnt(19)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB99_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB99_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v0, s17
; GFX9-NEXT: v_mov_b32_e32 v8, 0xc0c0004
@@ -71065,9 +69374,6 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: .LBB99_3: ; %end
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB99_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB99_2
;
; GFX11-TRUE16-LABEL: bitcast_v64i8_to_v32i16_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -71124,7 +69430,7 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v54
; GFX11-TRUE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB99_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB99_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, 0xc0c0004 :: v_dual_lshlrev_b32 v13, 8, v34
@@ -71328,9 +69634,6 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: .LBB99_3: ; %end
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB99_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB99_2
;
; GFX11-FAKE16-LABEL: bitcast_v64i8_to_v32i16_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -71387,7 +69690,7 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v54
; GFX11-FAKE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB99_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB99_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v8, 0xc0c0004
; GFX11-FAKE16-NEXT: s_and_b32 s76, s58, 0xff
@@ -71618,9 +69921,6 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: .LBB99_3: ; %end
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB99_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB99_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -72167,7 +70467,7 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; SI-NEXT: v_writelane_b32 v40, s38, 6
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v40, s39, 7
-; SI-NEXT: s_cbranch_scc0 .LBB101_3
+; SI-NEXT: s_cbranch_scc0 .LBB101_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s44, s16, 16
; SI-NEXT: s_lshl_b32 s45, s6, 16
@@ -72201,7 +70501,7 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; SI-NEXT: s_lshl_b32 s37, s59, 16
; SI-NEXT: s_lshl_b32 s38, s60, 16
; SI-NEXT: s_lshl_b32 s39, s63, 16
-; SI-NEXT: s_cbranch_execnz .LBB101_4
+; SI-NEXT: s_cbranch_execnz .LBB101_3
; SI-NEXT: .LBB101_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s63
; SI-NEXT: v_cvt_f32_f16_e32 v1, s60
@@ -72331,42 +70631,8 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: s_branch .LBB101_5
+; SI-NEXT: s_branch .LBB101_4
; SI-NEXT: .LBB101_3:
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr47
-; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: s_branch .LBB101_2
-; SI-NEXT: .LBB101_4:
; SI-NEXT: v_mov_b32_e32 v1, s39
; SI-NEXT: v_mov_b32_e32 v0, s38
; SI-NEXT: v_mov_b32_e32 v3, s37
@@ -72399,7 +70665,7 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; SI-NEXT: v_mov_b32_e32 v28, s46
; SI-NEXT: v_mov_b32_e32 v31, s45
; SI-NEXT: v_mov_b32_e32 v30, s44
-; SI-NEXT: .LBB101_5: ; %end
+; SI-NEXT: .LBB101_4: ; %end
; SI-NEXT: v_mul_f32_e32 v31, 1.0, v31
; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v31
; SI-NEXT: v_mul_f32_e32 v30, 1.0, v30
@@ -72490,9 +70756,9 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB101_3
+; VI-NEXT: s_cbranch_scc0 .LBB101_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB101_4
+; VI-NEXT: s_cbranch_execnz .LBB101_3
; VI-NEXT: .LBB101_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s5, s30, 16
; VI-NEXT: v_mov_b32_e32 v1, 0x200
@@ -72575,10 +70841,8 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; VI-NEXT: v_add_f16_e32 v1, s17, v1
; VI-NEXT: v_or_b32_e32 v1, v1, v17
; VI-NEXT: v_or_b32_e32 v0, v0, v16
-; VI-NEXT: s_branch .LBB101_5
+; VI-NEXT: s_branch .LBB101_4
; VI-NEXT: .LBB101_3:
-; VI-NEXT: s_branch .LBB101_2
-; VI-NEXT: .LBB101_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -72595,7 +70859,7 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB101_5: ; %end
+; VI-NEXT: .LBB101_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v18, 1
; VI-NEXT: v_readlane_b32 s30, v18, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -72616,9 +70880,9 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB101_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB101_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB101_4
+; GFX9-NEXT: s_cbranch_execnz .LBB101_3
; GFX9-NEXT: .LBB101_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, s31, v0 op_sel_hi:[1,0]
@@ -72637,10 +70901,8 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB101_5
+; GFX9-NEXT: s_branch .LBB101_4
; GFX9-NEXT: .LBB101_3:
-; GFX9-NEXT: s_branch .LBB101_2
-; GFX9-NEXT: .LBB101_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -72657,7 +70919,7 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB101_5: ; %end
+; GFX9-NEXT: .LBB101_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v16, 1
; GFX9-NEXT: v_readlane_b32 s30, v16, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -72675,10 +70937,10 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB101_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB101_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB101_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB101_3
; GFX11-NEXT: .LBB101_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
@@ -72698,8 +70960,6 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB101_3:
-; GFX11-NEXT: s_branch .LBB101_2
-; GFX11-NEXT: .LBB101_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -74817,9 +73077,9 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; VI-NEXT: v_readfirstlane_b32 s31, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_cbranch_scc0 .LBB103_3
+; VI-NEXT: s_cbranch_scc0 .LBB103_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB103_4
+; VI-NEXT: s_cbranch_execnz .LBB103_3
; VI-NEXT: .LBB103_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v16, 0x40c00000
; VI-NEXT: s_lshl_b32 s4, s26, 16
@@ -75117,10 +73377,8 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; VI-NEXT: v_lshrrev_b64 v[17:18], 16, v[17:18]
; VI-NEXT: v_lshrrev_b64 v[15:16], 16, v[15:16]
; VI-NEXT: v_mov_b32_e32 v13, v17
-; VI-NEXT: s_branch .LBB103_5
+; VI-NEXT: s_branch .LBB103_4
; VI-NEXT: .LBB103_3:
-; VI-NEXT: s_branch .LBB103_2
-; VI-NEXT: .LBB103_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -75137,7 +73395,7 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: v_mov_b32_e32 v14, s30
; VI-NEXT: v_mov_b32_e32 v15, s31
-; VI-NEXT: .LBB103_5: ; %end
+; VI-NEXT: .LBB103_4: ; %end
; VI-NEXT: v_readlane_b32 s31, v20, 1
; VI-NEXT: v_readlane_b32 s30, v20, 0
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -75158,9 +73416,9 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB103_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB103_4
+; GFX9-NEXT: s_cbranch_execnz .LBB103_3
; GFX9-NEXT: .LBB103_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: s_and_b32 s5, s30, 0xffff0000
@@ -75468,10 +73726,8 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; GFX9-NEXT: v_cndmask_b32_e32 v17, v18, v19, vcc
; GFX9-NEXT: v_and_b32_sdwa v16, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v16
-; GFX9-NEXT: s_branch .LBB103_5
+; GFX9-NEXT: s_branch .LBB103_4
; GFX9-NEXT: .LBB103_3:
-; GFX9-NEXT: s_branch .LBB103_2
-; GFX9-NEXT: .LBB103_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -75488,7 +73744,7 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v14, s30
; GFX9-NEXT: v_mov_b32_e32 v15, s31
-; GFX9-NEXT: .LBB103_5: ; %end
+; GFX9-NEXT: .LBB103_4: ; %end
; GFX9-NEXT: v_readlane_b32 s31, v20, 1
; GFX9-NEXT: v_readlane_b32 s30, v20, 0
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
@@ -75506,10 +73762,10 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB103_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB103_3
; GFX11-TRUE16-NEXT: .LBB103_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_and_b32 s0, s12, 0xffff0000
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s12, 16
@@ -75814,8 +74070,6 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v31.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB103_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB103_2
-; GFX11-TRUE16-NEXT: .LBB103_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -75835,10 +74089,10 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s28, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB103_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB103_3
; GFX11-FAKE16-NEXT: .LBB103_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_and_b32 s0, s12, 0xffff0000
; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s12, 16
@@ -76167,8 +74421,6 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v23
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB103_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB103_2
-; GFX11-FAKE16-NEXT: .LBB103_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -78764,7 +77016,7 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 ; 4-byte Folded Spill
-; VI-NEXT: s_cbranch_scc0 .LBB105_3
+; VI-NEXT: s_cbranch_scc0 .LBB105_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s88, s5, 24
; VI-NEXT: s_lshr_b32 s36, s5, 16
@@ -78814,7 +77066,7 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; VI-NEXT: s_lshr_b64 s[10:11], s[20:21], 24
; VI-NEXT: s_lshr_b64 s[8:9], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[6:7], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB105_4
+; VI-NEXT: s_cbranch_execnz .LBB105_3
; VI-NEXT: .LBB105_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s6, s17, 16
; VI-NEXT: v_mov_b32_e32 v1, 0x200
@@ -78933,58 +77185,8 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; VI-NEXT: v_bfe_u32 v56, v13, 8, 8
; VI-NEXT: v_bfe_u32 v59, v12, 8, 8
; VI-NEXT: v_bfe_u32 v62, v11, 8, 8
-; VI-NEXT: s_branch .LBB105_5
+; VI-NEXT: s_branch .LBB105_4
; VI-NEXT: .LBB105_3:
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr67
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr66
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr65
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr64
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr55
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr54
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr53
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr52
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr51
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr50
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr49
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr48
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr39
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr37
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: s_branch .LBB105_2
-; VI-NEXT: .LBB105_4:
; VI-NEXT: v_mov_b32_e32 v2, s44
; VI-NEXT: v_mov_b32_e32 v1, s79
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
@@ -79052,7 +77254,7 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s10
; VI-NEXT: v_mov_b32_e32 v7, s8
; VI-NEXT: v_mov_b32_e32 v8, s6
-; VI-NEXT: .LBB105_5: ; %end
+; VI-NEXT: .LBB105_4: ; %end
; VI-NEXT: s_mov_b32 s4, 0xc0c0004
; VI-NEXT: v_perm_b32 v8, v18, v8, s4
; VI-NEXT: v_perm_b32 v1, v33, v1, s4
@@ -79235,7 +77437,7 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB105_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB105_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s56, s5, 24
; GFX9-NEXT: s_lshr_b32 s57, s5, 16
@@ -79285,7 +77487,7 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; GFX9-NEXT: s_lshr_b64 s[10:11], s[20:21], 24
; GFX9-NEXT: s_lshr_b64 s[8:9], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[6:7], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB105_4
+; GFX9-NEXT: s_cbranch_execnz .LBB105_3
; GFX9-NEXT: .LBB105_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v1, 0x200
; GFX9-NEXT: v_pk_add_f16 v19, s17, v1 op_sel_hi:[1,0]
@@ -79358,58 +77560,8 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; GFX9-NEXT: v_lshrrev_b32_e32 v62, 8, v19
; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v18
; GFX9-NEXT: v_lshrrev_b32_e32 v15, 8, v18
-; GFX9-NEXT: s_branch .LBB105_5
+; GFX9-NEXT: s_branch .LBB105_4
; GFX9-NEXT: .LBB105_3:
-; GFX9-NEXT: ; implicit-def: $sgpr55
-; GFX9-NEXT: ; implicit-def: $sgpr53
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr54
-; GFX9-NEXT: ; implicit-def: $sgpr52
-; GFX9-NEXT: ; implicit-def: $sgpr51
-; GFX9-NEXT: ; implicit-def: $sgpr50
-; GFX9-NEXT: ; implicit-def: $sgpr48
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr49
-; GFX9-NEXT: ; implicit-def: $sgpr39
-; GFX9-NEXT: ; implicit-def: $sgpr38
-; GFX9-NEXT: ; implicit-def: $sgpr37
-; GFX9-NEXT: ; implicit-def: $sgpr35
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr36
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr31
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: s_branch .LBB105_2
-; GFX9-NEXT: .LBB105_4:
; GFX9-NEXT: v_mov_b32_e32 v20, s44
; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
@@ -79480,7 +77632,7 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v20, s40
; GFX9-NEXT: v_mov_b32_e32 v21, s14
-; GFX9-NEXT: .LBB105_5: ; %end
+; GFX9-NEXT: .LBB105_4: ; %end
; GFX9-NEXT: s_mov_b32 s4, 0xc0c0004
; GFX9-NEXT: v_perm_b32 v16, v16, v25, s4
; GFX9-NEXT: v_perm_b32 v15, v18, v15, s4
@@ -79625,7 +77777,7 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; GFX11-NEXT: v_writelane_b32 v40, s39, 7
; GFX11-NEXT: v_writelane_b32 v40, s48, 8
; GFX11-NEXT: v_writelane_b32 v40, s49, 9
-; GFX11-NEXT: s_cbranch_scc0 .LBB105_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB105_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s43, s27, 24
; GFX11-NEXT: s_lshr_b32 s44, s27, 16
@@ -79676,7 +77828,7 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
-; GFX11-NEXT: s_cbranch_vccnz .LBB105_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB105_3
; GFX11-NEXT: .LBB105_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s25 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s24 op_sel_hi:[0,1]
@@ -79742,58 +77894,8 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v83, 8, v18
; GFX11-NEXT: v_lshrrev_b32_e32 v96, 16, v17
; GFX11-NEXT: v_lshrrev_b32_e32 v85, 8, v17
-; GFX11-NEXT: s_branch .LBB105_5
+; GFX11-NEXT: s_branch .LBB105_4
; GFX11-NEXT: .LBB105_3:
-; GFX11-NEXT: ; implicit-def: $sgpr49
-; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr48
-; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr37
-; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr35
-; GFX11-NEXT: ; implicit-def: $sgpr31
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $vcc_hi
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr95
-; GFX11-NEXT: ; implicit-def: $sgpr93
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr91
-; GFX11-NEXT: ; implicit-def: $sgpr89
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr79
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr77
-; GFX11-NEXT: ; implicit-def: $sgpr75
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: s_branch .LBB105_2
-; GFX11-NEXT: .LBB105_4:
; GFX11-NEXT: v_dual_mov_b32 v17, s0 :: v_dual_mov_b32 v18, s1
; GFX11-NEXT: v_dual_mov_b32 v13, s2 :: v_dual_mov_b32 v14, s3
; GFX11-NEXT: v_dual_mov_b32 v11, s16 :: v_dual_mov_b32 v12, s17
@@ -79826,7 +77928,7 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v20, s14 :: v_dual_mov_b32 v21, s12
; GFX11-NEXT: v_dual_mov_b32 v22, s10 :: v_dual_mov_b32 v23, s8
; GFX11-NEXT: v_dual_mov_b32 v24, s6 :: v_dual_mov_b32 v25, s4
-; GFX11-NEXT: .LBB105_5: ; %end
+; GFX11-NEXT: .LBB105_4: ; %end
; GFX11-NEXT: v_perm_b32 v86, v86, v87, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_perm_b32 v24, v84, v24, 0xc0c0004
@@ -83242,7 +81344,7 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v35
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB107_4
+; VI-NEXT: s_cbranch_scc0 .LBB107_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v11, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -83498,9 +81600,6 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: .LBB107_3: ; %end
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB107_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB107_2
;
; GFX9-LABEL: bitcast_v64i8_to_v32f16_scalar:
; GFX9: ; %bb.0:
@@ -83559,7 +81658,7 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: s_waitcnt vmcnt(19)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB107_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB107_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v0, s17
; GFX9-NEXT: v_mov_b32_e32 v8, 0xc0c0004
@@ -83826,9 +81925,6 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: .LBB107_3: ; %end
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB107_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB107_2
;
; GFX11-TRUE16-LABEL: bitcast_v64i8_to_v32f16_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -83885,7 +81981,7 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v54
; GFX11-TRUE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB107_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB107_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, 0xc0c0004 :: v_dual_lshlrev_b32 v13, 8, v34
@@ -84089,9 +82185,6 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: .LBB107_3: ; %end
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB107_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB107_2
;
; GFX11-FAKE16-LABEL: bitcast_v64i8_to_v32f16_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -84148,7 +82241,7 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v54
; GFX11-FAKE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB107_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB107_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v8, 0xc0c0004
; GFX11-FAKE16-NEXT: s_and_b32 s76, s58, 0xff
@@ -84379,9 +82472,6 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: .LBB107_3: ; %end
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB107_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB107_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -88143,7 +86233,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 ; 4-byte Folded Spill
-; VI-NEXT: s_cbranch_scc0 .LBB109_3
+; VI-NEXT: s_cbranch_scc0 .LBB109_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s65, s5, 24
; VI-NEXT: s_lshr_b32 s64, s5, 16
@@ -88193,7 +86283,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; VI-NEXT: s_lshr_b64 s[10:11], s[20:21], 24
; VI-NEXT: s_lshr_b64 s[8:9], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[6:7], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB109_4
+; VI-NEXT: s_cbranch_execnz .LBB109_3
; VI-NEXT: .LBB109_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s6, s17, 16
; VI-NEXT: v_mov_b32_e32 v24, 0x40c00000
@@ -88542,58 +86632,8 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v62, 8, v1
; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v6, 8, v2
-; VI-NEXT: s_branch .LBB109_5
+; VI-NEXT: s_branch .LBB109_4
; VI-NEXT: .LBB109_3:
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr48
-; VI-NEXT: ; implicit-def: $sgpr49
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr37
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr39
-; VI-NEXT: ; implicit-def: $sgpr53
-; VI-NEXT: ; implicit-def: $sgpr54
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr50
-; VI-NEXT: ; implicit-def: $sgpr51
-; VI-NEXT: ; implicit-def: $sgpr52
-; VI-NEXT: ; implicit-def: $sgpr66
-; VI-NEXT: ; implicit-def: $sgpr67
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr55
-; VI-NEXT: ; implicit-def: $sgpr64
-; VI-NEXT: ; implicit-def: $sgpr65
-; VI-NEXT: s_branch .LBB109_2
-; VI-NEXT: .LBB109_4:
; VI-NEXT: v_mov_b32_e32 v1, s65
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v1, s64
@@ -88660,7 +86700,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s10
; VI-NEXT: v_mov_b32_e32 v30, s8
; VI-NEXT: v_mov_b32_e32 v31, s6
-; VI-NEXT: .LBB109_5: ; %end
+; VI-NEXT: .LBB109_4: ; %end
; VI-NEXT: s_mov_b32 s4, 0xc0c0004
; VI-NEXT: v_perm_b32 v3, v3, v31, s4
; VI-NEXT: v_perm_b32 v2, v2, v6, s4
@@ -88842,7 +86882,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB109_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s34, s5, 24
; GFX9-NEXT: s_lshr_b32 s55, s5, 16
@@ -88892,7 +86932,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; GFX9-NEXT: s_lshr_b64 s[10:11], s[20:21], 24
; GFX9-NEXT: s_lshr_b64 s[8:9], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[6:7], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB109_4
+; GFX9-NEXT: s_cbranch_execnz .LBB109_3
; GFX9-NEXT: .LBB109_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s6, s17, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v1, 0x40c00000
@@ -89257,58 +87297,8 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v4, 8, v4
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v3
; GFX9-NEXT: v_lshrrev_b32_e32 v3, 8, v3
-; GFX9-NEXT: s_branch .LBB109_5
+; GFX9-NEXT: s_branch .LBB109_4
; GFX9-NEXT: .LBB109_3:
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr48
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr49
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr50
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr51
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr52
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr53
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr35
-; GFX9-NEXT: ; implicit-def: $sgpr36
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr31
-; GFX9-NEXT: ; implicit-def: $sgpr54
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr38
-; GFX9-NEXT: ; implicit-def: $sgpr39
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr37
-; GFX9-NEXT: ; implicit-def: $sgpr55
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: s_branch .LBB109_2
-; GFX9-NEXT: .LBB109_4:
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v1, s48
@@ -89375,7 +87365,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v12, s10
; GFX9-NEXT: v_mov_b32_e32 v16, s8
; GFX9-NEXT: v_mov_b32_e32 v17, s6
-; GFX9-NEXT: .LBB109_5: ; %end
+; GFX9-NEXT: .LBB109_4: ; %end
; GFX9-NEXT: s_mov_b32 s4, 0xc0c0004
; GFX9-NEXT: v_perm_b32 v17, v26, v17, s4
; GFX9-NEXT: v_perm_b32 v3, v30, v3, s4
@@ -89516,7 +87506,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s39, 7
; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s48, 8
; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s49, 9
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB109_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s93, s27, 24
; GFX11-TRUE16-NEXT: s_lshr_b32 s49, s27, 16
@@ -89567,7 +87557,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-TRUE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB109_3
; GFX11-TRUE16-NEXT: .LBB109_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s1, 16
; GFX11-TRUE16-NEXT: s_and_b32 s1, s1, 0xffff0000
@@ -89927,58 +87917,8 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 8, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX11-TRUE16-NEXT: s_branch .LBB109_5
+; GFX11-TRUE16-NEXT: s_branch .LBB109_4
; GFX11-TRUE16-NEXT: .LBB109_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr34
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr35
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr36
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr37
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr79
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr38
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr91
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr39
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr94
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr95
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr28
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr92
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr48
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr89
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr30
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr31
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $vcc_hi
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr49
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr93
-; GFX11-TRUE16-NEXT: s_branch .LBB109_2
-; GFX11-TRUE16-NEXT: .LBB109_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, s26 :: v_dual_mov_b32 v36, s27
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s49 :: v_dual_mov_b32 v39, s24
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, s25 :: v_dual_mov_b32 v34, s22
@@ -90011,7 +87951,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s14 :: v_dual_mov_b32 v10, s12
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s10 :: v_dual_mov_b32 v12, s8
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s6 :: v_dual_mov_b32 v14, s4
-; GFX11-TRUE16-NEXT: .LBB109_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB109_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_perm_b32 v14, v96, v14, 0xc0c0004
; GFX11-TRUE16-NEXT: v_perm_b32 v1, v18, v1, 0xc0c0004
@@ -90117,7 +88057,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s39, 7
; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s48, 8
; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s49, 9
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB109_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s93, s27, 24
; GFX11-FAKE16-NEXT: s_lshr_b32 s49, s27, 16
@@ -90168,7 +88108,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-FAKE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB109_3
; GFX11-FAKE16-NEXT: .LBB109_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s1, 16
; GFX11-FAKE16-NEXT: s_and_b32 s1, s1, 0xffff0000
@@ -90531,58 +88471,8 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 8, v3
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 8, v2
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX11-FAKE16-NEXT: s_branch .LBB109_5
+; GFX11-FAKE16-NEXT: s_branch .LBB109_4
; GFX11-FAKE16-NEXT: .LBB109_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr34
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr35
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr36
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr37
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr79
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr38
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr91
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr39
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr94
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr95
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr28
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr92
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr48
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr89
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr30
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr31
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $vcc_hi
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr49
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr93
-; GFX11-FAKE16-NEXT: s_branch .LBB109_2
-; GFX11-FAKE16-NEXT: .LBB109_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v35, s26 :: v_dual_mov_b32 v34, s27
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v36, s49 :: v_dual_mov_b32 v37, s24
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v31, s25 :: v_dual_mov_b32 v32, s22
@@ -90615,7 +88505,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, s14 :: v_dual_mov_b32 v8, s12
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s10 :: v_dual_mov_b32 v10, s8
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s6 :: v_dual_mov_b32 v12, s4
-; GFX11-FAKE16-NEXT: .LBB109_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB109_4: ; %end
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-FAKE16-NEXT: v_perm_b32 v12, v96, v12, 0xc0c0004
; GFX11-FAKE16-NEXT: v_perm_b32 v15, v15, v87, 0xc0c0004
@@ -93948,7 +91838,7 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v35
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB111_4
+; VI-NEXT: s_cbranch_scc0 .LBB111_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v11, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -94204,9 +92094,6 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; VI-NEXT: .LBB111_3: ; %end
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB111_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB111_2
;
; GFX9-LABEL: bitcast_v64i8_to_v32bf16_scalar:
; GFX9: ; %bb.0:
@@ -94265,7 +92152,7 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX9-NEXT: s_waitcnt vmcnt(19)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB111_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB111_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v0, s17
; GFX9-NEXT: v_mov_b32_e32 v8, 0xc0c0004
@@ -94532,9 +92419,6 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX9-NEXT: .LBB111_3: ; %end
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB111_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB111_2
;
; GFX11-TRUE16-LABEL: bitcast_v64i8_to_v32bf16_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -94591,7 +92475,7 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v54
; GFX11-TRUE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB111_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB111_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, 0xc0c0004 :: v_dual_lshlrev_b32 v13, 8, v34
@@ -94795,9 +92679,6 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX11-TRUE16-NEXT: .LBB111_3: ; %end
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB111_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB111_2
;
; GFX11-FAKE16-LABEL: bitcast_v64i8_to_v32bf16_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -94854,7 +92735,7 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v54
; GFX11-FAKE16-NEXT: s_and_b32 s76, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB111_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB111_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v8, 0xc0c0004
; GFX11-FAKE16-NEXT: s_and_b32 s76, s58, 0xff
@@ -95085,9 +92966,6 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX11-FAKE16-NEXT: .LBB111_3: ; %end
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB111_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB111_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll
index 498bd7fda4cec..a94a71bf2a81d 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll
@@ -158,7 +158,7 @@ define inreg <18 x float> @bitcast_v18i32_to_v18f32_scalar(<18 x i32> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s8, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s9, v0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -200,8 +200,6 @@ define inreg <18 x float> @bitcast_v18i32_to_v18f32_scalar(<18 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v16, s7
; SI-NEXT: v_mov_b32_e32 v17, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v18i32_to_v18f32_scalar:
; VI: ; %bb.0:
@@ -212,7 +210,7 @@ define inreg <18 x float> @bitcast_v18i32_to_v18f32_scalar(<18 x i32> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s8, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s9, v0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -254,8 +252,6 @@ define inreg <18 x float> @bitcast_v18i32_to_v18f32_scalar(<18 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v16, s7
; VI-NEXT: v_mov_b32_e32 v17, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v18i32_to_v18f32_scalar:
; GFX9: ; %bb.0:
@@ -266,7 +262,7 @@ define inreg <18 x float> @bitcast_v18i32_to_v18f32_scalar(<18 x i32> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s8, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s9, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -308,8 +304,6 @@ define inreg <18 x float> @bitcast_v18i32_to_v18f32_scalar(<18 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v16, s7
; GFX9-NEXT: v_mov_b32_e32 v17, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v18i32_to_v18f32_scalar:
; GFX11: ; %bb.0:
@@ -317,7 +311,7 @@ define inreg <18 x float> @bitcast_v18i32_to_v18f32_scalar(<18 x i32> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-NEXT: s_cmp_lg_u32 s4, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -352,8 +346,6 @@ define inreg <18 x float> @bitcast_v18i32_to_v18f32_scalar(<18 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -542,9 +534,9 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v17, s53, 1.0
; SI-NEXT: v_add_f32_e64 v16, s52, 1.0
@@ -564,10 +556,8 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB3_5
+; SI-NEXT: s_branch .LBB3_4
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -600,7 +590,7 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB3_5: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: v_readlane_b32 s53, v32, 9
; SI-NEXT: v_readlane_b32 s52, v32, 8
; SI-NEXT: v_readlane_b32 s51, v32, 7
@@ -653,9 +643,9 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v17, s53, 1.0
; VI-NEXT: v_add_f32_e64 v16, s52, 1.0
@@ -675,10 +665,8 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB3_5
+; VI-NEXT: s_branch .LBB3_4
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -711,7 +699,7 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB3_5: ; %end
+; VI-NEXT: .LBB3_4: ; %end
; VI-NEXT: v_readlane_b32 s53, v32, 9
; VI-NEXT: v_readlane_b32 s52, v32, 8
; VI-NEXT: v_readlane_b32 s51, v32, 7
@@ -764,9 +752,9 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v17, s53, 1.0
; GFX9-NEXT: v_add_f32_e64 v16, s52, 1.0
@@ -786,10 +774,8 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB3_5
+; GFX9-NEXT: s_branch .LBB3_4
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -822,7 +808,7 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB3_5: ; %end
+; GFX9-NEXT: .LBB3_4: ; %end
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
; GFX9-NEXT: v_readlane_b32 s52, v32, 8
; GFX9-NEXT: v_readlane_b32 s51, v32, 7
@@ -876,10 +862,10 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s52, s28
; GFX11-NEXT: v_writelane_b32 v32, s53, 9
; GFX11-NEXT: s_mov_b32 s53, s29
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v17, s53, 1.0
; GFX11-NEXT: v_add_f32_e64 v16, s52, 1.0
@@ -899,10 +885,8 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB3_5
+; GFX11-NEXT: s_branch .LBB3_4
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -919,7 +903,7 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB3_5: ; %end
+; GFX11-NEXT: .LBB3_4: ; %end
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
; GFX11-NEXT: v_readlane_b32 s52, v32, 8
; GFX11-NEXT: v_readlane_b32 s51, v32, 7
@@ -1104,7 +1088,7 @@ define inreg <9 x i64> @bitcast_v18i32_to_v9i64_scalar(<18 x i32> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s8, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s9, v0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB5_3
; SI-NEXT: .LBB5_2: ; %cmp.true
@@ -1146,8 +1130,6 @@ define inreg <9 x i64> @bitcast_v18i32_to_v9i64_scalar(<18 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v16, s7
; SI-NEXT: v_mov_b32_e32 v17, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v18i32_to_v9i64_scalar:
; VI: ; %bb.0:
@@ -1158,7 +1140,7 @@ define inreg <9 x i64> @bitcast_v18i32_to_v9i64_scalar(<18 x i32> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s8, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s9, v0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -1200,8 +1182,6 @@ define inreg <9 x i64> @bitcast_v18i32_to_v9i64_scalar(<18 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v16, s7
; VI-NEXT: v_mov_b32_e32 v17, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v18i32_to_v9i64_scalar:
; GFX9: ; %bb.0:
@@ -1212,7 +1192,7 @@ define inreg <9 x i64> @bitcast_v18i32_to_v9i64_scalar(<18 x i32> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s8, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s9, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -1254,8 +1234,6 @@ define inreg <9 x i64> @bitcast_v18i32_to_v9i64_scalar(<18 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v16, s7
; GFX9-NEXT: v_mov_b32_e32 v17, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v18i32_to_v9i64_scalar:
; GFX11: ; %bb.0:
@@ -1263,7 +1241,7 @@ define inreg <9 x i64> @bitcast_v18i32_to_v9i64_scalar(<18 x i32> inreg %a, i32
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-NEXT: s_cmp_lg_u32 s4, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -1298,8 +1276,6 @@ define inreg <9 x i64> @bitcast_v18i32_to_v9i64_scalar(<18 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1474,7 +1450,7 @@ define inreg <18 x i32> @bitcast_v9i64_to_v18i32_scalar(<9 x i64> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s8, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s9, v0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB7_3
; SI-NEXT: .LBB7_2: ; %cmp.true
@@ -1516,8 +1492,6 @@ define inreg <18 x i32> @bitcast_v9i64_to_v18i32_scalar(<9 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v16, s7
; SI-NEXT: v_mov_b32_e32 v17, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v9i64_to_v18i32_scalar:
; VI: ; %bb.0:
@@ -1528,7 +1502,7 @@ define inreg <18 x i32> @bitcast_v9i64_to_v18i32_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s8, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s9, v0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -1570,8 +1544,6 @@ define inreg <18 x i32> @bitcast_v9i64_to_v18i32_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v16, s7
; VI-NEXT: v_mov_b32_e32 v17, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v9i64_to_v18i32_scalar:
; GFX9: ; %bb.0:
@@ -1582,7 +1554,7 @@ define inreg <18 x i32> @bitcast_v9i64_to_v18i32_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s8, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s9, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
@@ -1624,8 +1596,6 @@ define inreg <18 x i32> @bitcast_v9i64_to_v18i32_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v16, s7
; GFX9-NEXT: v_mov_b32_e32 v17, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v9i64_to_v18i32_scalar:
; GFX11: ; %bb.0:
@@ -1633,7 +1603,7 @@ define inreg <18 x i32> @bitcast_v9i64_to_v18i32_scalar(<9 x i64> inreg %a, i32
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-NEXT: s_cmp_lg_u32 s4, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
@@ -1668,8 +1638,6 @@ define inreg <18 x i32> @bitcast_v9i64_to_v18i32_scalar(<9 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1839,7 +1807,7 @@ define inreg <9 x double> @bitcast_v18i32_to_v9f64_scalar(<18 x i32> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s8, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s9, v0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB9_3
; SI-NEXT: .LBB9_2: ; %cmp.true
@@ -1881,8 +1849,6 @@ define inreg <9 x double> @bitcast_v18i32_to_v9f64_scalar(<18 x i32> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v16, s7
; SI-NEXT: v_mov_b32_e32 v17, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v18i32_to_v9f64_scalar:
; VI: ; %bb.0:
@@ -1893,7 +1859,7 @@ define inreg <9 x double> @bitcast_v18i32_to_v9f64_scalar(<18 x i32> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s8, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s9, v0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -1935,8 +1901,6 @@ define inreg <9 x double> @bitcast_v18i32_to_v9f64_scalar(<18 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v16, s7
; VI-NEXT: v_mov_b32_e32 v17, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v18i32_to_v9f64_scalar:
; GFX9: ; %bb.0:
@@ -1947,7 +1911,7 @@ define inreg <9 x double> @bitcast_v18i32_to_v9f64_scalar(<18 x i32> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s8, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s9, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -1989,8 +1953,6 @@ define inreg <9 x double> @bitcast_v18i32_to_v9f64_scalar(<18 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v16, s7
; GFX9-NEXT: v_mov_b32_e32 v17, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v18i32_to_v9f64_scalar:
; GFX11: ; %bb.0:
@@ -1998,7 +1960,7 @@ define inreg <9 x double> @bitcast_v18i32_to_v9f64_scalar(<18 x i32> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-NEXT: s_cmp_lg_u32 s4, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -2033,8 +1995,6 @@ define inreg <9 x double> @bitcast_v18i32_to_v9f64_scalar(<18 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2196,9 +2156,9 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
; SI-NEXT: v_add_f64 v[14:15], s[50:51], 1.0
@@ -2209,10 +2169,8 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; SI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; SI-NEXT: s_branch .LBB11_5
+; SI-NEXT: s_branch .LBB11_4
; SI-NEXT: .LBB11_3:
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -2245,7 +2203,7 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB11_5: ; %end
+; SI-NEXT: .LBB11_4: ; %end
; SI-NEXT: v_readlane_b32 s53, v32, 9
; SI-NEXT: v_readlane_b32 s52, v32, 8
; SI-NEXT: v_readlane_b32 s51, v32, 7
@@ -2298,9 +2256,9 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
; VI-NEXT: v_add_f64 v[14:15], s[50:51], 1.0
@@ -2311,10 +2269,8 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; VI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; VI-NEXT: s_branch .LBB11_5
+; VI-NEXT: s_branch .LBB11_4
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -2347,7 +2303,7 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB11_5: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: v_readlane_b32 s53, v32, 9
; VI-NEXT: v_readlane_b32 s52, v32, 8
; VI-NEXT: v_readlane_b32 s51, v32, 7
@@ -2400,9 +2356,9 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
; GFX9-NEXT: v_add_f64 v[14:15], s[50:51], 1.0
@@ -2413,10 +2369,8 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; GFX9-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX9-NEXT: s_branch .LBB11_5
+; GFX9-NEXT: s_branch .LBB11_4
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -2449,7 +2403,7 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB11_5: ; %end
+; GFX9-NEXT: .LBB11_4: ; %end
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
; GFX9-NEXT: v_readlane_b32 s52, v32, 8
; GFX9-NEXT: v_readlane_b32 s51, v32, 7
@@ -2503,10 +2457,10 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s52, s28
; GFX11-NEXT: v_writelane_b32 v32, s53, 9
; GFX11-NEXT: s_mov_b32 s53, s29
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
; GFX11-NEXT: v_add_f64 v[14:15], s[50:51], 1.0
@@ -2517,10 +2471,8 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; GFX11-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX11-NEXT: s_branch .LBB11_5
+; GFX11-NEXT: s_branch .LBB11_4
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -2537,7 +2489,7 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB11_5: ; %end
+; GFX11-NEXT: .LBB11_4: ; %end
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
; GFX11-NEXT: v_readlane_b32 s52, v32, 8
; GFX11-NEXT: v_readlane_b32 s51, v32, 7
@@ -3110,7 +3062,7 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s7, v1
; SI-NEXT: s_cmp_lg_u32 s6, 0
; SI-NEXT: v_readfirstlane_b32 s6, v0
-; SI-NEXT: s_cbranch_scc0 .LBB13_4
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s60, s5, 16
; SI-NEXT: s_lshr_b32 s61, s7, 16
@@ -3242,26 +3194,6 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v16, s4
; SI-NEXT: v_mov_b32_e32 v17, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB13_4:
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: s_branch .LBB13_2
;
; VI-LABEL: bitcast_v18i32_to_v36i16_scalar:
; VI: ; %bb.0:
@@ -3272,7 +3204,7 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s8, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s9, v0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s10, s6, 16
; VI-NEXT: s_lshr_b32 s11, s7, 16
@@ -3404,26 +3336,6 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v16, s7
; VI-NEXT: v_mov_b32_e32 v17, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v18i32_to_v36i16_scalar:
; GFX9: ; %bb.0:
@@ -3434,7 +3346,7 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s8, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s9, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s10, s6, 16
; GFX9-NEXT: s_lshr_b32 s11, s7, 16
@@ -3530,26 +3442,6 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v16, s7
; GFX9-NEXT: v_mov_b32_e32 v17, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v18i32_to_v36i16_scalar:
; GFX11: ; %bb.0:
@@ -3557,7 +3449,7 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-NEXT: s_mov_b32 s46, 0
; GFX11-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s4, s29, 16
; GFX11-NEXT: s_lshr_b32 s5, s28, 16
@@ -3646,26 +3538,6 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v14, s7 :: v_dual_mov_b32 v15, s6
; GFX11-NEXT: v_dual_mov_b32 v16, s5 :: v_dual_mov_b32 v17, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4472,7 +4344,7 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; SI-NEXT: v_writelane_b32 v18, s66, 14
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v18, s67, 15
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s91, 16
@@ -4660,9 +4532,6 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v36i16_to_v18i32_scalar:
; VI: ; %bb.0:
@@ -4710,7 +4579,7 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; VI-NEXT: v_writelane_b32 v18, s66, 14
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_writelane_b32 v18, s67, 15
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s91, 16
@@ -4898,9 +4767,6 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v36i16_to_v18i32_scalar:
; GFX9: ; %bb.0:
@@ -4960,9 +4826,9 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s51, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s52, s58, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s53, s56, s57
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -4982,10 +4848,8 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v15, s51, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v16, s52, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v17, s53, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB15_5
+; GFX9-NEXT: s_branch .LBB15_4
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -5018,7 +4882,7 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB15_5: ; %end
+; GFX9-NEXT: .LBB15_4: ; %end
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
; GFX9-NEXT: v_readlane_b32 s52, v32, 8
; GFX9-NEXT: v_readlane_b32 s51, v32, 7
@@ -5077,10 +4941,10 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s27, s15
; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -5102,8 +4966,6 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -5678,7 +5540,7 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s7, v1
; SI-NEXT: s_cmp_lg_u32 s6, 0
; SI-NEXT: v_readfirstlane_b32 s6, v0
-; SI-NEXT: s_cbranch_scc0 .LBB17_4
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s60, s5, 16
; SI-NEXT: s_lshr_b32 s61, s7, 16
@@ -5810,26 +5672,6 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v16, s4
; SI-NEXT: v_mov_b32_e32 v17, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB17_4:
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: s_branch .LBB17_2
;
; VI-LABEL: bitcast_v18i32_to_v36f16_scalar:
; VI: ; %bb.0:
@@ -5840,7 +5682,7 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s8, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s9, v0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s10, s6, 16
; VI-NEXT: s_lshr_b32 s11, s7, 16
@@ -5972,26 +5814,6 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v16, s7
; VI-NEXT: v_mov_b32_e32 v17, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v18i32_to_v36f16_scalar:
; GFX9: ; %bb.0:
@@ -6002,7 +5824,7 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s8, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s9, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s10, s6, 16
; GFX9-NEXT: s_lshr_b32 s11, s7, 16
@@ -6098,26 +5920,6 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v16, s7
; GFX9-NEXT: v_mov_b32_e32 v17, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v18i32_to_v36f16_scalar:
; GFX11: ; %bb.0:
@@ -6125,7 +5927,7 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-NEXT: s_mov_b32 s46, 0
; GFX11-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s4, s29, 16
; GFX11-NEXT: s_lshr_b32 s5, s28, 16
@@ -6214,26 +6016,6 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v14, s7 :: v_dual_mov_b32 v15, s6
; GFX11-NEXT: v_dual_mov_b32 v16, s5 :: v_dual_mov_b32 v17, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7114,7 +6896,7 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; SI-NEXT: v_writelane_b32 v32, s66, 14
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v32, s67, 15
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s91, 16
@@ -7170,7 +6952,7 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s53, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB19_4
+; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s91
; SI-NEXT: v_cvt_f32_f16_e32 v2, s90
@@ -7316,11 +7098,8 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_or_b32_e32 v17, v18, v17
-; SI-NEXT: s_branch .LBB19_5
+; SI-NEXT: s_branch .LBB19_4
; SI-NEXT: .LBB19_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB19_2
-; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -7353,7 +7132,7 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB19_5: ; %end
+; SI-NEXT: .LBB19_4: ; %end
; SI-NEXT: v_readlane_b32 s67, v32, 15
; SI-NEXT: v_readlane_b32 s66, v32, 14
; SI-NEXT: v_readlane_b32 s65, v32, 13
@@ -7422,7 +7201,7 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; VI-NEXT: v_writelane_b32 v32, s66, 14
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_writelane_b32 v32, s67, 15
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s91, 16
@@ -7478,7 +7257,7 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s53, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB19_4
+; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v17, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s91
@@ -7553,11 +7332,8 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; VI-NEXT: v_add_f16_sdwa v18, v18, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v17, s6, v17
; VI-NEXT: v_or_b32_e32 v17, v17, v18
-; VI-NEXT: s_branch .LBB19_5
+; VI-NEXT: s_branch .LBB19_4
; VI-NEXT: .LBB19_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB19_2
-; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -7590,7 +7366,7 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB19_5: ; %end
+; VI-NEXT: .LBB19_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v32, 15
; VI-NEXT: v_readlane_b32 s66, v32, 14
; VI-NEXT: v_readlane_b32 s65, v32, 13
@@ -7671,9 +7447,9 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s51, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s52, s58, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s53, s56, s57
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v17, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v17 op_sel_hi:[1,0]
@@ -7694,10 +7470,8 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v15, s51, v17 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v16, s52, v17 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v17, s53, v17 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB19_5
+; GFX9-NEXT: s_branch .LBB19_4
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -7730,7 +7504,7 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB19_5: ; %end
+; GFX9-NEXT: .LBB19_4: ; %end
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
; GFX9-NEXT: v_readlane_b32 s52, v32, 8
; GFX9-NEXT: v_readlane_b32 s51, v32, 7
@@ -7789,10 +7563,10 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s27, s15
; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -7814,8 +7588,6 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -8021,9 +7793,9 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB21_3
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_4
+; SI-NEXT: s_cbranch_execnz .LBB21_3
; SI-NEXT: .LBB21_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v17, s53, 1.0
; SI-NEXT: v_add_f32_e64 v16, s52, 1.0
@@ -8043,10 +7815,8 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB21_5
+; SI-NEXT: s_branch .LBB21_4
; SI-NEXT: .LBB21_3:
-; SI-NEXT: s_branch .LBB21_2
-; SI-NEXT: .LBB21_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -8079,7 +7849,7 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB21_5: ; %end
+; SI-NEXT: .LBB21_4: ; %end
; SI-NEXT: v_readlane_b32 s53, v32, 9
; SI-NEXT: v_readlane_b32 s52, v32, 8
; SI-NEXT: v_readlane_b32 s51, v32, 7
@@ -8132,9 +7902,9 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB21_3
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_4
+; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v17, s53, 1.0
; VI-NEXT: v_add_f32_e64 v16, s52, 1.0
@@ -8154,10 +7924,8 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB21_5
+; VI-NEXT: s_branch .LBB21_4
; VI-NEXT: .LBB21_3:
-; VI-NEXT: s_branch .LBB21_2
-; VI-NEXT: .LBB21_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -8190,7 +7958,7 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB21_5: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: v_readlane_b32 s53, v32, 9
; VI-NEXT: v_readlane_b32 s52, v32, 8
; VI-NEXT: v_readlane_b32 s51, v32, 7
@@ -8243,9 +8011,9 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
+; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v17, s53, 1.0
; GFX9-NEXT: v_add_f32_e64 v16, s52, 1.0
@@ -8265,10 +8033,8 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB21_5
+; GFX9-NEXT: s_branch .LBB21_4
; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
-; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -8301,7 +8067,7 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB21_5: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
; GFX9-NEXT: v_readlane_b32 s52, v32, 8
; GFX9-NEXT: v_readlane_b32 s51, v32, 7
@@ -8355,10 +8121,10 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s52, s28
; GFX11-NEXT: v_writelane_b32 v32, s53, 9
; GFX11-NEXT: s_mov_b32 s53, s29
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
; GFX11-NEXT: .LBB21_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v17, s53, 1.0
; GFX11-NEXT: v_add_f32_e64 v16, s52, 1.0
@@ -8378,10 +8144,8 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB21_5
+; GFX11-NEXT: s_branch .LBB21_4
; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
-; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -8398,7 +8162,7 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB21_5: ; %end
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
; GFX11-NEXT: v_readlane_b32 s52, v32, 8
; GFX11-NEXT: v_readlane_b32 s51, v32, 7
@@ -8588,7 +8352,7 @@ define inreg <18 x float> @bitcast_v9i64_to_v18f32_scalar(<9 x i64> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s8, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s9, v0
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB23_3
; SI-NEXT: .LBB23_2: ; %cmp.true
@@ -8630,8 +8394,6 @@ define inreg <18 x float> @bitcast_v9i64_to_v18f32_scalar(<9 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v16, s7
; SI-NEXT: v_mov_b32_e32 v17, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v9i64_to_v18f32_scalar:
; VI: ; %bb.0:
@@ -8642,7 +8404,7 @@ define inreg <18 x float> @bitcast_v9i64_to_v18f32_scalar(<9 x i64> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s8, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s9, v0
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
@@ -8684,8 +8446,6 @@ define inreg <18 x float> @bitcast_v9i64_to_v18f32_scalar(<9 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v16, s7
; VI-NEXT: v_mov_b32_e32 v17, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v9i64_to_v18f32_scalar:
; GFX9: ; %bb.0:
@@ -8696,7 +8456,7 @@ define inreg <18 x float> @bitcast_v9i64_to_v18f32_scalar(<9 x i64> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s8, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s9, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
@@ -8738,8 +8498,6 @@ define inreg <18 x float> @bitcast_v9i64_to_v18f32_scalar(<9 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v16, s7
; GFX9-NEXT: v_mov_b32_e32 v17, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v9i64_to_v18f32_scalar:
; GFX11: ; %bb.0:
@@ -8747,7 +8505,7 @@ define inreg <18 x float> @bitcast_v9i64_to_v18f32_scalar(<9 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-NEXT: s_cmp_lg_u32 s4, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
@@ -8782,8 +8540,6 @@ define inreg <18 x float> @bitcast_v9i64_to_v18f32_scalar(<9 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: s_branch .LBB23_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8972,9 +8728,9 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB25_3
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_4
+; SI-NEXT: s_cbranch_execnz .LBB25_3
; SI-NEXT: .LBB25_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v17, s53, 1.0
; SI-NEXT: v_add_f32_e64 v16, s52, 1.0
@@ -8994,10 +8750,8 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB25_5
+; SI-NEXT: s_branch .LBB25_4
; SI-NEXT: .LBB25_3:
-; SI-NEXT: s_branch .LBB25_2
-; SI-NEXT: .LBB25_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -9030,7 +8784,7 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB25_5: ; %end
+; SI-NEXT: .LBB25_4: ; %end
; SI-NEXT: v_readlane_b32 s53, v32, 9
; SI-NEXT: v_readlane_b32 s52, v32, 8
; SI-NEXT: v_readlane_b32 s51, v32, 7
@@ -9083,9 +8837,9 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB25_3
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_4
+; VI-NEXT: s_cbranch_execnz .LBB25_3
; VI-NEXT: .LBB25_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v17, s53, 1.0
; VI-NEXT: v_add_f32_e64 v16, s52, 1.0
@@ -9105,10 +8859,8 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB25_5
+; VI-NEXT: s_branch .LBB25_4
; VI-NEXT: .LBB25_3:
-; VI-NEXT: s_branch .LBB25_2
-; VI-NEXT: .LBB25_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -9141,7 +8893,7 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB25_5: ; %end
+; VI-NEXT: .LBB25_4: ; %end
; VI-NEXT: v_readlane_b32 s53, v32, 9
; VI-NEXT: v_readlane_b32 s52, v32, 8
; VI-NEXT: v_readlane_b32 s51, v32, 7
@@ -9194,9 +8946,9 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_4
+; GFX9-NEXT: s_cbranch_execnz .LBB25_3
; GFX9-NEXT: .LBB25_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v17, s53, 1.0
; GFX9-NEXT: v_add_f32_e64 v16, s52, 1.0
@@ -9216,10 +8968,8 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB25_5
+; GFX9-NEXT: s_branch .LBB25_4
; GFX9-NEXT: .LBB25_3:
-; GFX9-NEXT: s_branch .LBB25_2
-; GFX9-NEXT: .LBB25_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -9252,7 +9002,7 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB25_5: ; %end
+; GFX9-NEXT: .LBB25_4: ; %end
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
; GFX9-NEXT: v_readlane_b32 s52, v32, 8
; GFX9-NEXT: v_readlane_b32 s51, v32, 7
@@ -9306,10 +9056,10 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s52, s28
; GFX11-NEXT: v_writelane_b32 v32, s53, 9
; GFX11-NEXT: s_mov_b32 s53, s29
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_3
; GFX11-NEXT: .LBB25_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v17, s53, 1.0
; GFX11-NEXT: v_add_f32_e64 v16, s52, 1.0
@@ -9329,10 +9079,8 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB25_5
+; GFX11-NEXT: s_branch .LBB25_4
; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
-; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -9349,7 +9097,7 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB25_5: ; %end
+; GFX11-NEXT: .LBB25_4: ; %end
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
; GFX11-NEXT: v_readlane_b32 s52, v32, 8
; GFX11-NEXT: v_readlane_b32 s51, v32, 7
@@ -9526,9 +9274,9 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB27_3
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB27_4
+; SI-NEXT: s_cbranch_execnz .LBB27_3
; SI-NEXT: .LBB27_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
; SI-NEXT: v_add_f64 v[14:15], s[50:51], 1.0
@@ -9539,10 +9287,8 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; SI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; SI-NEXT: s_branch .LBB27_5
+; SI-NEXT: s_branch .LBB27_4
; SI-NEXT: .LBB27_3:
-; SI-NEXT: s_branch .LBB27_2
-; SI-NEXT: .LBB27_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -9575,7 +9321,7 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB27_5: ; %end
+; SI-NEXT: .LBB27_4: ; %end
; SI-NEXT: v_readlane_b32 s53, v32, 9
; SI-NEXT: v_readlane_b32 s52, v32, 8
; SI-NEXT: v_readlane_b32 s51, v32, 7
@@ -9628,9 +9374,9 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB27_3
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_4
+; VI-NEXT: s_cbranch_execnz .LBB27_3
; VI-NEXT: .LBB27_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
; VI-NEXT: v_add_f64 v[14:15], s[50:51], 1.0
@@ -9641,10 +9387,8 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; VI-NEXT: s_branch .LBB27_5
+; VI-NEXT: s_branch .LBB27_4
; VI-NEXT: .LBB27_3:
-; VI-NEXT: s_branch .LBB27_2
-; VI-NEXT: .LBB27_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -9677,7 +9421,7 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB27_5: ; %end
+; VI-NEXT: .LBB27_4: ; %end
; VI-NEXT: v_readlane_b32 s53, v32, 9
; VI-NEXT: v_readlane_b32 s52, v32, 8
; VI-NEXT: v_readlane_b32 s51, v32, 7
@@ -9730,9 +9474,9 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_4
+; GFX9-NEXT: s_cbranch_execnz .LBB27_3
; GFX9-NEXT: .LBB27_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
; GFX9-NEXT: v_add_f64 v[14:15], s[50:51], 1.0
@@ -9743,10 +9487,8 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX9-NEXT: s_branch .LBB27_5
+; GFX9-NEXT: s_branch .LBB27_4
; GFX9-NEXT: .LBB27_3:
-; GFX9-NEXT: s_branch .LBB27_2
-; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -9779,7 +9521,7 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB27_5: ; %end
+; GFX9-NEXT: .LBB27_4: ; %end
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
; GFX9-NEXT: v_readlane_b32 s52, v32, 8
; GFX9-NEXT: v_readlane_b32 s51, v32, 7
@@ -9833,10 +9575,10 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s52, s28
; GFX11-NEXT: v_writelane_b32 v32, s53, 9
; GFX11-NEXT: s_mov_b32 s53, s29
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB27_3
; GFX11-NEXT: .LBB27_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
; GFX11-NEXT: v_add_f64 v[14:15], s[50:51], 1.0
@@ -9847,10 +9589,8 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX11-NEXT: s_branch .LBB27_5
+; GFX11-NEXT: s_branch .LBB27_4
; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
-; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -9867,7 +9607,7 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB27_5: ; %end
+; GFX11-NEXT: .LBB27_4: ; %end
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
; GFX11-NEXT: v_readlane_b32 s52, v32, 8
; GFX11-NEXT: v_readlane_b32 s51, v32, 7
@@ -10422,7 +10162,7 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s7, v1
; SI-NEXT: s_cmp_lg_u32 s6, 0
; SI-NEXT: v_readfirstlane_b32 s6, v0
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s76, s5, 16
; SI-NEXT: s_lshr_b32 s75, s7, 16
@@ -10442,7 +10182,7 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[44:45], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB29_4
+; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v17, s5, 1.0
; SI-NEXT: v_add_f32_e64 v16, s4, 1.0
@@ -10480,28 +10220,8 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v1
-; SI-NEXT: s_branch .LBB29_5
+; SI-NEXT: s_branch .LBB29_4
; SI-NEXT: .LBB29_3:
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: s_branch .LBB29_2
-; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -10538,7 +10258,7 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v24, s44
; SI-NEXT: v_mov_b32_e32 v25, s46
; SI-NEXT: v_mov_b32_e32 v26, s56
-; SI-NEXT: .LBB29_5: ; %end
+; SI-NEXT: .LBB29_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
@@ -10604,7 +10324,7 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s8, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s9, v0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s10, s6, 16
; VI-NEXT: s_lshr_b32 s11, s7, 16
@@ -10624,7 +10344,7 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; VI-NEXT: s_lshr_b32 s57, s18, 16
; VI-NEXT: s_lshr_b32 s58, s17, 16
; VI-NEXT: s_lshr_b32 s59, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB29_4
+; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v17, s6, 1.0
; VI-NEXT: v_add_f32_e64 v16, s7, 1.0
@@ -10662,28 +10382,8 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; VI-NEXT: s_branch .LBB29_5
+; VI-NEXT: s_branch .LBB29_4
; VI-NEXT: .LBB29_3:
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB29_2
-; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -10720,7 +10420,7 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v20, s12
; VI-NEXT: v_mov_b32_e32 v19, s11
; VI-NEXT: v_mov_b32_e32 v18, s10
-; VI-NEXT: .LBB29_5: ; %end
+; VI-NEXT: .LBB29_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v35, 16, v35
; VI-NEXT: v_lshlrev_b32_e32 v34, 16, v34
; VI-NEXT: v_lshlrev_b32_e32 v33, 16, v33
@@ -10768,7 +10468,7 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s8, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s9, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s10, s6, 16
; GFX9-NEXT: s_lshr_b32 s11, s7, 16
@@ -10788,7 +10488,7 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; GFX9-NEXT: s_lshr_b32 s57, s18, 16
; GFX9-NEXT: s_lshr_b32 s58, s17, 16
; GFX9-NEXT: s_lshr_b32 s59, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
+; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v17, s6, 1.0
; GFX9-NEXT: v_add_f32_e64 v16, s7, 1.0
@@ -10826,28 +10526,8 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX9-NEXT: s_branch .LBB29_5
+; GFX9-NEXT: s_branch .LBB29_4
; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB29_2
-; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -10884,7 +10564,7 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v20, s12
; GFX9-NEXT: v_mov_b32_e32 v19, s11
; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: .LBB29_5: ; %end
+; GFX9-NEXT: .LBB29_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -10929,7 +10609,7 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s4, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s28, 16
@@ -10950,7 +10630,7 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, s29, 1.0
; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, s28, 1.0
@@ -10988,28 +10668,8 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB29_5
+; GFX11-TRUE16-NEXT: s_branch .LBB29_4
; GFX11-TRUE16-NEXT: .LBB29_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr5
-; GFX11-TRUE16-NEXT: s_branch .LBB29_2
-; GFX11-TRUE16-NEXT: .LBB29_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -11028,7 +10688,7 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, s6 :: v_dual_mov_b32 v18, s5
-; GFX11-TRUE16-NEXT: .LBB29_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB29_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
@@ -11065,7 +10725,7 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s28, 16
@@ -11086,7 +10746,7 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, s29, 1.0
; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, s28, 1.0
@@ -11124,28 +10784,8 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-FAKE16-NEXT: s_branch .LBB29_5
+; GFX11-FAKE16-NEXT: s_branch .LBB29_4
; GFX11-FAKE16-NEXT: .LBB29_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr5
-; GFX11-FAKE16-NEXT: s_branch .LBB29_2
-; GFX11-FAKE16-NEXT: .LBB29_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v7, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s16 :: v_dual_mov_b32 v5, s17
@@ -11164,7 +10804,7 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, s6 :: v_dual_mov_b32 v18, s5
-; GFX11-FAKE16-NEXT: .LBB29_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB29_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v0
@@ -12008,7 +11648,7 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; SI-NEXT: v_writelane_b32 v18, s66, 14
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v18, s67, 15
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s91, 16
@@ -12196,9 +11836,6 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v36i16_to_v18f32_scalar:
; VI: ; %bb.0:
@@ -12246,7 +11883,7 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; VI-NEXT: v_writelane_b32 v18, s66, 14
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_writelane_b32 v18, s67, 15
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s91, 16
@@ -12434,9 +12071,6 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v36i16_to_v18f32_scalar:
; GFX9: ; %bb.0:
@@ -12496,9 +12130,9 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s51, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s52, s58, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s53, s56, s57
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_4
+; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -12518,10 +12152,8 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v15, s51, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v16, s52, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v17, s53, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB31_5
+; GFX9-NEXT: s_branch .LBB31_4
; GFX9-NEXT: .LBB31_3:
-; GFX9-NEXT: s_branch .LBB31_2
-; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -12554,7 +12186,7 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB31_5: ; %end
+; GFX9-NEXT: .LBB31_4: ; %end
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
; GFX9-NEXT: v_readlane_b32 s52, v32, 8
; GFX9-NEXT: v_readlane_b32 s51, v32, 7
@@ -12613,10 +12245,10 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s27, s15
; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-NEXT: .LBB31_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -12638,8 +12270,6 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
-; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -13196,7 +12826,7 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s7, v1
; SI-NEXT: s_cmp_lg_u32 s6, 0
; SI-NEXT: v_readfirstlane_b32 s6, v0
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s76, s5, 16
; SI-NEXT: s_lshr_b32 s75, s7, 16
@@ -13216,7 +12846,7 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[44:45], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB33_4
+; SI-NEXT: s_cbranch_execnz .LBB33_3
; SI-NEXT: .LBB33_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v17, s5, 1.0
; SI-NEXT: v_add_f32_e64 v16, s4, 1.0
@@ -13254,28 +12884,8 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v1
-; SI-NEXT: s_branch .LBB33_5
+; SI-NEXT: s_branch .LBB33_4
; SI-NEXT: .LBB33_3:
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: s_branch .LBB33_2
-; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -13312,7 +12922,7 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v24, s44
; SI-NEXT: v_mov_b32_e32 v25, s46
; SI-NEXT: v_mov_b32_e32 v26, s56
-; SI-NEXT: .LBB33_5: ; %end
+; SI-NEXT: .LBB33_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
@@ -13378,7 +12988,7 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s8, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s9, v0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s10, s6, 16
; VI-NEXT: s_lshr_b32 s11, s7, 16
@@ -13398,7 +13008,7 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; VI-NEXT: s_lshr_b32 s57, s18, 16
; VI-NEXT: s_lshr_b32 s58, s17, 16
; VI-NEXT: s_lshr_b32 s59, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB33_4
+; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v17, s6, 1.0
; VI-NEXT: v_add_f32_e64 v16, s7, 1.0
@@ -13436,28 +13046,8 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; VI-NEXT: s_branch .LBB33_5
+; VI-NEXT: s_branch .LBB33_4
; VI-NEXT: .LBB33_3:
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB33_2
-; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -13494,7 +13084,7 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v20, s12
; VI-NEXT: v_mov_b32_e32 v19, s11
; VI-NEXT: v_mov_b32_e32 v18, s10
-; VI-NEXT: .LBB33_5: ; %end
+; VI-NEXT: .LBB33_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v35, 16, v35
; VI-NEXT: v_lshlrev_b32_e32 v34, 16, v34
; VI-NEXT: v_lshlrev_b32_e32 v33, 16, v33
@@ -13542,7 +13132,7 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s8, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s9, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s10, s6, 16
; GFX9-NEXT: s_lshr_b32 s11, s7, 16
@@ -13562,7 +13152,7 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; GFX9-NEXT: s_lshr_b32 s57, s18, 16
; GFX9-NEXT: s_lshr_b32 s58, s17, 16
; GFX9-NEXT: s_lshr_b32 s59, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
+; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v17, s6, 1.0
; GFX9-NEXT: v_add_f32_e64 v16, s7, 1.0
@@ -13600,28 +13190,8 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX9-NEXT: s_branch .LBB33_5
+; GFX9-NEXT: s_branch .LBB33_4
; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB33_2
-; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -13658,7 +13228,7 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v20, s12
; GFX9-NEXT: v_mov_b32_e32 v19, s11
; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: .LBB33_5: ; %end
+; GFX9-NEXT: .LBB33_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -13703,7 +13273,7 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s4, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s28, 16
@@ -13724,7 +13294,7 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, s29, 1.0
; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, s28, 1.0
@@ -13762,28 +13332,8 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB33_5
+; GFX11-TRUE16-NEXT: s_branch .LBB33_4
; GFX11-TRUE16-NEXT: .LBB33_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr5
-; GFX11-TRUE16-NEXT: s_branch .LBB33_2
-; GFX11-TRUE16-NEXT: .LBB33_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -13802,7 +13352,7 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, s6 :: v_dual_mov_b32 v18, s5
-; GFX11-TRUE16-NEXT: .LBB33_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB33_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
@@ -13839,7 +13389,7 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s28, 16
@@ -13860,7 +13410,7 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, s29, 1.0
; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, s28, 1.0
@@ -13898,28 +13448,8 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-FAKE16-NEXT: s_branch .LBB33_5
+; GFX11-FAKE16-NEXT: s_branch .LBB33_4
; GFX11-FAKE16-NEXT: .LBB33_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr5
-; GFX11-FAKE16-NEXT: s_branch .LBB33_2
-; GFX11-FAKE16-NEXT: .LBB33_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v7, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s16 :: v_dual_mov_b32 v5, s17
@@ -13938,7 +13468,7 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, s6 :: v_dual_mov_b32 v18, s5
-; GFX11-FAKE16-NEXT: .LBB33_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB33_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v0
@@ -14856,7 +14386,7 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; SI-NEXT: v_writelane_b32 v32, s66, 14
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v32, s67, 15
-; SI-NEXT: s_cbranch_scc0 .LBB35_3
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s91, 16
@@ -14912,7 +14442,7 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s53, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB35_4
+; SI-NEXT: s_cbranch_execnz .LBB35_3
; SI-NEXT: .LBB35_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s91
; SI-NEXT: v_cvt_f32_f16_e32 v2, s90
@@ -15058,11 +14588,8 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_or_b32_e32 v17, v18, v17
-; SI-NEXT: s_branch .LBB35_5
+; SI-NEXT: s_branch .LBB35_4
; SI-NEXT: .LBB35_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB35_2
-; SI-NEXT: .LBB35_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -15095,7 +14622,7 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB35_5: ; %end
+; SI-NEXT: .LBB35_4: ; %end
; SI-NEXT: v_readlane_b32 s67, v32, 15
; SI-NEXT: v_readlane_b32 s66, v32, 14
; SI-NEXT: v_readlane_b32 s65, v32, 13
@@ -15164,7 +14691,7 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; VI-NEXT: v_writelane_b32 v32, s66, 14
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_writelane_b32 v32, s67, 15
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s91, 16
@@ -15220,7 +14747,7 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s53, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB35_4
+; VI-NEXT: s_cbranch_execnz .LBB35_3
; VI-NEXT: .LBB35_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v17, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s91
@@ -15295,11 +14822,8 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; VI-NEXT: v_add_f16_sdwa v18, v18, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v17, s6, v17
; VI-NEXT: v_or_b32_e32 v17, v17, v18
-; VI-NEXT: s_branch .LBB35_5
+; VI-NEXT: s_branch .LBB35_4
; VI-NEXT: .LBB35_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB35_2
-; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -15332,7 +14856,7 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB35_5: ; %end
+; VI-NEXT: .LBB35_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v32, 15
; VI-NEXT: v_readlane_b32 s66, v32, 14
; VI-NEXT: v_readlane_b32 s65, v32, 13
@@ -15413,9 +14937,9 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s51, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s52, s58, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s53, s56, s57
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
+; GFX9-NEXT: s_cbranch_execnz .LBB35_3
; GFX9-NEXT: .LBB35_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v17, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v17 op_sel_hi:[1,0]
@@ -15436,10 +14960,8 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v15, s51, v17 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v16, s52, v17 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v17, s53, v17 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB35_5
+; GFX9-NEXT: s_branch .LBB35_4
; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
-; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -15472,7 +14994,7 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB35_5: ; %end
+; GFX9-NEXT: .LBB35_4: ; %end
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
; GFX9-NEXT: v_readlane_b32 s52, v32, 8
; GFX9-NEXT: v_readlane_b32 s51, v32, 7
@@ -15531,10 +15053,10 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s27, s15
; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-NEXT: .LBB35_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -15556,8 +15078,6 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
-; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -15749,7 +15269,7 @@ define inreg <9 x double> @bitcast_v9i64_to_v9f64_scalar(<9 x i64> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s8, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s9, v0
-; SI-NEXT: s_cbranch_scc0 .LBB37_4
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB37_3
; SI-NEXT: .LBB37_2: ; %cmp.true
@@ -15791,8 +15311,6 @@ define inreg <9 x double> @bitcast_v9i64_to_v9f64_scalar(<9 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v16, s7
; SI-NEXT: v_mov_b32_e32 v17, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_4:
-; SI-NEXT: s_branch .LBB37_2
;
; VI-LABEL: bitcast_v9i64_to_v9f64_scalar:
; VI: ; %bb.0:
@@ -15803,7 +15321,7 @@ define inreg <9 x double> @bitcast_v9i64_to_v9f64_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s8, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s9, v0
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB37_3
; VI-NEXT: .LBB37_2: ; %cmp.true
@@ -15845,8 +15363,6 @@ define inreg <9 x double> @bitcast_v9i64_to_v9f64_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v16, s7
; VI-NEXT: v_mov_b32_e32 v17, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v9i64_to_v9f64_scalar:
; GFX9: ; %bb.0:
@@ -15857,7 +15373,7 @@ define inreg <9 x double> @bitcast_v9i64_to_v9f64_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s8, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s9, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB37_3
; GFX9-NEXT: .LBB37_2: ; %cmp.true
@@ -15899,8 +15415,6 @@ define inreg <9 x double> @bitcast_v9i64_to_v9f64_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v16, s7
; GFX9-NEXT: v_mov_b32_e32 v17, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v9i64_to_v9f64_scalar:
; GFX11: ; %bb.0:
@@ -15908,7 +15422,7 @@ define inreg <9 x double> @bitcast_v9i64_to_v9f64_scalar(<9 x i64> inreg %a, i32
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-NEXT: s_cmp_lg_u32 s4, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
@@ -15942,8 +15456,6 @@ define inreg <9 x double> @bitcast_v9i64_to_v9f64_scalar(<9 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_4:
-; GFX11-NEXT: s_branch .LBB37_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16105,9 +15617,9 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB39_3
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_4
+; SI-NEXT: s_cbranch_execnz .LBB39_3
; SI-NEXT: .LBB39_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -16118,10 +15630,8 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; SI-NEXT: v_add_f64 v[12:13], s[48:49], 1.0
; SI-NEXT: v_add_f64 v[14:15], s[50:51], 1.0
; SI-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
-; SI-NEXT: s_branch .LBB39_5
+; SI-NEXT: s_branch .LBB39_4
; SI-NEXT: .LBB39_3:
-; SI-NEXT: s_branch .LBB39_2
-; SI-NEXT: .LBB39_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -16154,7 +15664,7 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB39_5: ; %end
+; SI-NEXT: .LBB39_4: ; %end
; SI-NEXT: v_readlane_b32 s53, v32, 9
; SI-NEXT: v_readlane_b32 s52, v32, 8
; SI-NEXT: v_readlane_b32 s51, v32, 7
@@ -16207,9 +15717,9 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB39_3
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_4
+; VI-NEXT: s_cbranch_execnz .LBB39_3
; VI-NEXT: .LBB39_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -16220,10 +15730,8 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; VI-NEXT: v_add_f64 v[12:13], s[48:49], 1.0
; VI-NEXT: v_add_f64 v[14:15], s[50:51], 1.0
; VI-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
-; VI-NEXT: s_branch .LBB39_5
+; VI-NEXT: s_branch .LBB39_4
; VI-NEXT: .LBB39_3:
-; VI-NEXT: s_branch .LBB39_2
-; VI-NEXT: .LBB39_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -16256,7 +15764,7 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB39_5: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: v_readlane_b32 s53, v32, 9
; VI-NEXT: v_readlane_b32 s52, v32, 8
; VI-NEXT: v_readlane_b32 s51, v32, 7
@@ -16309,9 +15817,9 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
+; GFX9-NEXT: s_cbranch_execnz .LBB39_3
; GFX9-NEXT: .LBB39_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -16322,10 +15830,8 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; GFX9-NEXT: v_add_f64 v[12:13], s[48:49], 1.0
; GFX9-NEXT: v_add_f64 v[14:15], s[50:51], 1.0
; GFX9-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
-; GFX9-NEXT: s_branch .LBB39_5
+; GFX9-NEXT: s_branch .LBB39_4
; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
-; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -16358,7 +15864,7 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB39_5: ; %end
+; GFX9-NEXT: .LBB39_4: ; %end
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
; GFX9-NEXT: v_readlane_b32 s52, v32, 8
; GFX9-NEXT: v_readlane_b32 s51, v32, 7
@@ -16412,10 +15918,10 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s52, s28
; GFX11-NEXT: v_writelane_b32 v32, s53, 9
; GFX11-NEXT: s_mov_b32 s53, s29
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_3
; GFX11-NEXT: .LBB39_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -16426,10 +15932,8 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; GFX11-NEXT: v_add_f64 v[12:13], s[48:49], 1.0
; GFX11-NEXT: v_add_f64 v[14:15], s[50:51], 1.0
; GFX11-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
-; GFX11-NEXT: s_branch .LBB39_5
+; GFX11-NEXT: s_branch .LBB39_4
; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
-; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -16446,7 +15950,7 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB39_5: ; %end
+; GFX11-NEXT: .LBB39_4: ; %end
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
; GFX11-NEXT: v_readlane_b32 s52, v32, 8
; GFX11-NEXT: v_readlane_b32 s51, v32, 7
@@ -17029,7 +16533,7 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s7, v1
; SI-NEXT: s_cmp_lg_u32 s6, 0
; SI-NEXT: v_readfirstlane_b32 s6, v0
-; SI-NEXT: s_cbranch_scc0 .LBB41_4
+; SI-NEXT: s_cbranch_scc0 .LBB41_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s60, s5, 16
; SI-NEXT: s_lshr_b32 s61, s7, 16
@@ -17161,26 +16665,6 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v16, s4
; SI-NEXT: v_mov_b32_e32 v17, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB41_4:
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: s_branch .LBB41_2
;
; VI-LABEL: bitcast_v9i64_to_v36i16_scalar:
; VI: ; %bb.0:
@@ -17191,7 +16675,7 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s8, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s9, v0
-; VI-NEXT: s_cbranch_scc0 .LBB41_4
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s10, s6, 16
; VI-NEXT: s_lshr_b32 s11, s7, 16
@@ -17323,26 +16807,6 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v16, s7
; VI-NEXT: v_mov_b32_e32 v17, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB41_4:
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB41_2
;
; GFX9-LABEL: bitcast_v9i64_to_v36i16_scalar:
; GFX9: ; %bb.0:
@@ -17353,7 +16817,7 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s8, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s9, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s10, s6, 16
; GFX9-NEXT: s_lshr_b32 s11, s7, 16
@@ -17449,26 +16913,6 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v16, s7
; GFX9-NEXT: v_mov_b32_e32 v17, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB41_4:
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB41_2
;
; GFX11-LABEL: bitcast_v9i64_to_v36i16_scalar:
; GFX11: ; %bb.0:
@@ -17476,7 +16920,7 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-NEXT: s_mov_b32 s46, 0
; GFX11-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s4, s29, 16
; GFX11-NEXT: s_lshr_b32 s5, s28, 16
@@ -17565,26 +17009,6 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v14, s7 :: v_dual_mov_b32 v15, s6
; GFX11-NEXT: v_dual_mov_b32 v16, s5 :: v_dual_mov_b32 v17, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB41_4:
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: s_branch .LBB41_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -18391,7 +17815,7 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; SI-NEXT: v_writelane_b32 v18, s66, 14
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v18, s67, 15
-; SI-NEXT: s_cbranch_scc0 .LBB43_4
+; SI-NEXT: s_cbranch_scc0 .LBB43_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s91, 16
@@ -18579,9 +18003,6 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB43_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB43_2
;
; VI-LABEL: bitcast_v36i16_to_v9i64_scalar:
; VI: ; %bb.0:
@@ -18629,7 +18050,7 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; VI-NEXT: v_writelane_b32 v18, s66, 14
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_writelane_b32 v18, s67, 15
-; VI-NEXT: s_cbranch_scc0 .LBB43_4
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s91, 16
@@ -18817,9 +18238,6 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB43_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB43_2
;
; GFX9-LABEL: bitcast_v36i16_to_v9i64_scalar:
; GFX9: ; %bb.0:
@@ -18879,9 +18297,9 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; GFX9-NEXT: s_pack_ll_b32_b16 s51, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s52, s58, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s53, s56, s57
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
+; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -18901,10 +18319,8 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v15, s51, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v16, s52, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v17, s53, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB43_5
+; GFX9-NEXT: s_branch .LBB43_4
; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
-; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -18937,7 +18353,7 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB43_5: ; %end
+; GFX9-NEXT: .LBB43_4: ; %end
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
; GFX9-NEXT: v_readlane_b32 s52, v32, 8
; GFX9-NEXT: v_readlane_b32 s51, v32, 7
@@ -18996,10 +18412,10 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s27, s15
; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-NEXT: .LBB43_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -19021,8 +18437,6 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
-; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -19607,7 +19021,7 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s7, v1
; SI-NEXT: s_cmp_lg_u32 s6, 0
; SI-NEXT: v_readfirstlane_b32 s6, v0
-; SI-NEXT: s_cbranch_scc0 .LBB45_4
+; SI-NEXT: s_cbranch_scc0 .LBB45_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s60, s5, 16
; SI-NEXT: s_lshr_b32 s61, s7, 16
@@ -19739,26 +19153,6 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v16, s4
; SI-NEXT: v_mov_b32_e32 v17, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB45_4:
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: s_branch .LBB45_2
;
; VI-LABEL: bitcast_v9i64_to_v36f16_scalar:
; VI: ; %bb.0:
@@ -19769,7 +19163,7 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: v_readfirstlane_b32 s8, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s9, v0
-; VI-NEXT: s_cbranch_scc0 .LBB45_4
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s10, s6, 16
; VI-NEXT: s_lshr_b32 s11, s7, 16
@@ -19901,26 +19295,6 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v16, s7
; VI-NEXT: v_mov_b32_e32 v17, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_4:
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB45_2
;
; GFX9-LABEL: bitcast_v9i64_to_v36f16_scalar:
; GFX9: ; %bb.0:
@@ -19931,7 +19305,7 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: v_readfirstlane_b32 s8, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s9, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s10, s6, 16
; GFX9-NEXT: s_lshr_b32 s11, s7, 16
@@ -20027,26 +19401,6 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v16, s7
; GFX9-NEXT: v_mov_b32_e32 v17, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_4:
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB45_2
;
; GFX11-LABEL: bitcast_v9i64_to_v36f16_scalar:
; GFX11: ; %bb.0:
@@ -20054,7 +19408,7 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-NEXT: s_mov_b32 s46, 0
; GFX11-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s4, s29, 16
; GFX11-NEXT: s_lshr_b32 s5, s28, 16
@@ -20143,26 +19497,6 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v14, s7 :: v_dual_mov_b32 v15, s6
; GFX11-NEXT: v_dual_mov_b32 v16, s5 :: v_dual_mov_b32 v17, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_4:
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: s_branch .LBB45_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -21043,7 +20377,7 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; SI-NEXT: v_writelane_b32 v32, s66, 14
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v32, s67, 15
-; SI-NEXT: s_cbranch_scc0 .LBB47_3
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s91, 16
@@ -21099,7 +20433,7 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s53, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB47_4
+; SI-NEXT: s_cbranch_execnz .LBB47_3
; SI-NEXT: .LBB47_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s91
; SI-NEXT: v_cvt_f32_f16_e32 v2, s90
@@ -21245,11 +20579,8 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_or_b32_e32 v17, v18, v17
-; SI-NEXT: s_branch .LBB47_5
+; SI-NEXT: s_branch .LBB47_4
; SI-NEXT: .LBB47_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB47_2
-; SI-NEXT: .LBB47_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -21282,7 +20613,7 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB47_5: ; %end
+; SI-NEXT: .LBB47_4: ; %end
; SI-NEXT: v_readlane_b32 s67, v32, 15
; SI-NEXT: v_readlane_b32 s66, v32, 14
; SI-NEXT: v_readlane_b32 s65, v32, 13
@@ -21351,7 +20682,7 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; VI-NEXT: v_writelane_b32 v32, s66, 14
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_writelane_b32 v32, s67, 15
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s91, 16
@@ -21407,7 +20738,7 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s53, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB47_4
+; VI-NEXT: s_cbranch_execnz .LBB47_3
; VI-NEXT: .LBB47_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v17, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s91
@@ -21482,11 +20813,8 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; VI-NEXT: v_add_f16_sdwa v18, v18, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v17, s6, v17
; VI-NEXT: v_or_b32_e32 v17, v17, v18
-; VI-NEXT: s_branch .LBB47_5
+; VI-NEXT: s_branch .LBB47_4
; VI-NEXT: .LBB47_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB47_2
-; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -21519,7 +20847,7 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB47_5: ; %end
+; VI-NEXT: .LBB47_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v32, 15
; VI-NEXT: v_readlane_b32 s66, v32, 14
; VI-NEXT: v_readlane_b32 s65, v32, 13
@@ -21600,9 +20928,9 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; GFX9-NEXT: s_pack_ll_b32_b16 s51, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s52, s58, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s53, s56, s57
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
+; GFX9-NEXT: s_cbranch_execnz .LBB47_3
; GFX9-NEXT: .LBB47_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v17, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v17 op_sel_hi:[1,0]
@@ -21623,10 +20951,8 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v15, s51, v17 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v16, s52, v17 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v17, s53, v17 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB47_5
+; GFX9-NEXT: s_branch .LBB47_4
; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
-; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -21659,7 +20985,7 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB47_5: ; %end
+; GFX9-NEXT: .LBB47_4: ; %end
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
; GFX9-NEXT: v_readlane_b32 s52, v32, 8
; GFX9-NEXT: v_readlane_b32 s51, v32, 7
@@ -21718,10 +21044,10 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s27, s15
; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-NEXT: .LBB47_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -21743,8 +21069,6 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -22274,7 +21598,7 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s5, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v0
-; SI-NEXT: s_cbranch_scc0 .LBB49_3
+; SI-NEXT: s_cbranch_scc0 .LBB49_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s76, s7, 16
; SI-NEXT: s_lshr_b32 s75, s5, 16
@@ -22294,7 +21618,7 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; SI-NEXT: s_lshr_b64 s[44:45], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB49_4
+; SI-NEXT: s_cbranch_execnz .LBB49_3
; SI-NEXT: .LBB49_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[16:17], s[6:7], 1.0
; SI-NEXT: v_add_f64 v[14:15], s[4:5], 1.0
@@ -22323,28 +21647,8 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v1
-; SI-NEXT: s_branch .LBB49_5
+; SI-NEXT: s_branch .LBB49_4
; SI-NEXT: .LBB49_3:
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: s_branch .LBB49_2
-; SI-NEXT: .LBB49_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v17, s7
@@ -22381,7 +21685,7 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v20, s12
; SI-NEXT: v_mov_b32_e32 v19, s10
; SI-NEXT: v_mov_b32_e32 v18, s8
-; SI-NEXT: .LBB49_5: ; %end
+; SI-NEXT: .LBB49_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
@@ -22447,7 +21751,7 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s5, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s4, v0
-; VI-NEXT: s_cbranch_scc0 .LBB49_3
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s10, s7, 16
; VI-NEXT: s_lshr_b32 s43, s6, 16
@@ -22467,7 +21771,7 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; VI-NEXT: s_lshr_b32 s58, s18, 16
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s59, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB49_4
+; VI-NEXT: s_cbranch_execnz .LBB49_3
; VI-NEXT: .LBB49_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[16:17], s[6:7], 1.0
; VI-NEXT: v_add_f64 v[14:15], s[4:5], 1.0
@@ -22496,28 +21800,8 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; VI-NEXT: s_branch .LBB49_5
+; VI-NEXT: s_branch .LBB49_4
; VI-NEXT: .LBB49_3:
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB49_2
-; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -22554,7 +21838,7 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v24, s12
; VI-NEXT: v_mov_b32_e32 v22, s11
; VI-NEXT: v_mov_b32_e32 v20, s10
-; VI-NEXT: .LBB49_5: ; %end
+; VI-NEXT: .LBB49_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v33, 16, v33
; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v31
; VI-NEXT: v_lshlrev_b32_e32 v29, 16, v29
@@ -22602,7 +21886,7 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s5, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s10, s7, 16
; GFX9-NEXT: s_lshr_b32 s43, s6, 16
@@ -22622,7 +21906,7 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s58, s18, 16
; GFX9-NEXT: s_lshr_b32 s42, s17, 16
; GFX9-NEXT: s_lshr_b32 s59, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
+; GFX9-NEXT: s_cbranch_execnz .LBB49_3
; GFX9-NEXT: .LBB49_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[16:17], s[6:7], 1.0
; GFX9-NEXT: v_add_f64 v[14:15], s[4:5], 1.0
@@ -22651,28 +21935,8 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; GFX9-NEXT: s_branch .LBB49_5
+; GFX9-NEXT: s_branch .LBB49_4
; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB49_2
-; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -22709,7 +21973,7 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v24, s12
; GFX9-NEXT: v_mov_b32_e32 v22, s11
; GFX9-NEXT: v_mov_b32_e32 v20, s10
-; GFX9-NEXT: .LBB49_5: ; %end
+; GFX9-NEXT: .LBB49_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -22754,7 +22018,7 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s4, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
@@ -22775,7 +22039,7 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], s[28:29], 1.0
; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
@@ -22804,28 +22068,8 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB49_5
+; GFX11-TRUE16-NEXT: s_branch .LBB49_4
; GFX11-TRUE16-NEXT: .LBB49_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr5
-; GFX11-TRUE16-NEXT: s_branch .LBB49_2
-; GFX11-TRUE16-NEXT: .LBB49_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -22844,7 +22088,7 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s40 :: v_dual_mov_b32 v23, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s15 :: v_dual_mov_b32 v21, s6
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, s14 :: v_dual_mov_b32 v19, s5
-; GFX11-TRUE16-NEXT: .LBB49_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB49_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, v32 :: v_dual_mov_b32 v31, v31
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v22, v22
@@ -22884,7 +22128,7 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s28, 16
@@ -22905,7 +22149,7 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f64 v[13:14], s[28:29], 1.0
; GFX11-FAKE16-NEXT: v_add_f64 v[15:16], s[26:27], 1.0
@@ -22934,28 +22178,8 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v19
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v1
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v0
-; GFX11-FAKE16-NEXT: s_branch .LBB49_5
+; GFX11-FAKE16-NEXT: s_branch .LBB49_4
; GFX11-FAKE16-NEXT: .LBB49_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr5
-; GFX11-FAKE16-NEXT: s_branch .LBB49_2
-; GFX11-FAKE16-NEXT: .LBB49_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v19, s2
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v10, s20
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s18 :: v_dual_mov_b32 v8, s22
@@ -22974,7 +22198,7 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s11 :: v_dual_mov_b32 v29, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s8 :: v_dual_mov_b32 v25, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s6 :: v_dual_mov_b32 v23, s5
-; GFX11-FAKE16-NEXT: .LBB49_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB49_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
@@ -23818,7 +23042,7 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; SI-NEXT: v_writelane_b32 v18, s66, 14
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v18, s67, 15
-; SI-NEXT: s_cbranch_scc0 .LBB51_4
+; SI-NEXT: s_cbranch_scc0 .LBB51_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s91, 16
@@ -24006,9 +23230,6 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB51_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB51_2
;
; VI-LABEL: bitcast_v36i16_to_v9f64_scalar:
; VI: ; %bb.0:
@@ -24056,7 +23277,7 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; VI-NEXT: v_writelane_b32 v18, s66, 14
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_writelane_b32 v18, s67, 15
-; VI-NEXT: s_cbranch_scc0 .LBB51_4
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s91, 16
@@ -24244,9 +23465,6 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB51_2
;
; GFX9-LABEL: bitcast_v36i16_to_v9f64_scalar:
; GFX9: ; %bb.0:
@@ -24306,9 +23524,9 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s51, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s52, s58, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s53, s56, s57
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB51_4
+; GFX9-NEXT: s_cbranch_execnz .LBB51_3
; GFX9-NEXT: .LBB51_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -24328,10 +23546,8 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v15, s51, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v16, s52, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v17, s53, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB51_5
+; GFX9-NEXT: s_branch .LBB51_4
; GFX9-NEXT: .LBB51_3:
-; GFX9-NEXT: s_branch .LBB51_2
-; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -24364,7 +23580,7 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB51_5: ; %end
+; GFX9-NEXT: .LBB51_4: ; %end
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
; GFX9-NEXT: v_readlane_b32 s52, v32, 8
; GFX9-NEXT: v_readlane_b32 s51, v32, 7
@@ -24423,10 +23639,10 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s27, s15
; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-NEXT: .LBB51_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -24448,8 +23664,6 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
-; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -24979,7 +24193,7 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s5, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v0
-; SI-NEXT: s_cbranch_scc0 .LBB53_3
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s76, s7, 16
; SI-NEXT: s_lshr_b32 s75, s5, 16
@@ -24999,7 +24213,7 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; SI-NEXT: s_lshr_b64 s[44:45], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB53_4
+; SI-NEXT: s_cbranch_execnz .LBB53_3
; SI-NEXT: .LBB53_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[16:17], s[6:7], 1.0
; SI-NEXT: v_add_f64 v[14:15], s[4:5], 1.0
@@ -25028,28 +24242,8 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v1
-; SI-NEXT: s_branch .LBB53_5
+; SI-NEXT: s_branch .LBB53_4
; SI-NEXT: .LBB53_3:
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr63
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: s_branch .LBB53_2
-; SI-NEXT: .LBB53_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v17, s7
@@ -25086,7 +24280,7 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v20, s12
; SI-NEXT: v_mov_b32_e32 v19, s10
; SI-NEXT: v_mov_b32_e32 v18, s8
-; SI-NEXT: .LBB53_5: ; %end
+; SI-NEXT: .LBB53_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
@@ -25152,7 +24346,7 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s5, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s4, v0
-; VI-NEXT: s_cbranch_scc0 .LBB53_3
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s10, s7, 16
; VI-NEXT: s_lshr_b32 s43, s6, 16
@@ -25172,7 +24366,7 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; VI-NEXT: s_lshr_b32 s58, s18, 16
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s59, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB53_4
+; VI-NEXT: s_cbranch_execnz .LBB53_3
; VI-NEXT: .LBB53_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[16:17], s[6:7], 1.0
; VI-NEXT: v_add_f64 v[14:15], s[4:5], 1.0
@@ -25201,28 +24395,8 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; VI-NEXT: s_branch .LBB53_5
+; VI-NEXT: s_branch .LBB53_4
; VI-NEXT: .LBB53_3:
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB53_2
-; VI-NEXT: .LBB53_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -25259,7 +24433,7 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v24, s12
; VI-NEXT: v_mov_b32_e32 v22, s11
; VI-NEXT: v_mov_b32_e32 v20, s10
-; VI-NEXT: .LBB53_5: ; %end
+; VI-NEXT: .LBB53_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v33, 16, v33
; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v31
; VI-NEXT: v_lshlrev_b32_e32 v29, 16, v29
@@ -25307,7 +24481,7 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s5, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s10, s7, 16
; GFX9-NEXT: s_lshr_b32 s43, s6, 16
@@ -25327,7 +24501,7 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; GFX9-NEXT: s_lshr_b32 s58, s18, 16
; GFX9-NEXT: s_lshr_b32 s42, s17, 16
; GFX9-NEXT: s_lshr_b32 s59, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB53_4
+; GFX9-NEXT: s_cbranch_execnz .LBB53_3
; GFX9-NEXT: .LBB53_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[16:17], s[6:7], 1.0
; GFX9-NEXT: v_add_f64 v[14:15], s[4:5], 1.0
@@ -25356,28 +24530,8 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; GFX9-NEXT: s_branch .LBB53_5
+; GFX9-NEXT: s_branch .LBB53_4
; GFX9-NEXT: .LBB53_3:
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB53_2
-; GFX9-NEXT: .LBB53_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -25414,7 +24568,7 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v24, s12
; GFX9-NEXT: v_mov_b32_e32 v22, s11
; GFX9-NEXT: v_mov_b32_e32 v20, s10
-; GFX9-NEXT: .LBB53_5: ; %end
+; GFX9-NEXT: .LBB53_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -25459,7 +24613,7 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s4, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
@@ -25480,7 +24634,7 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], s[28:29], 1.0
; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
@@ -25509,28 +24663,8 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB53_5
+; GFX11-TRUE16-NEXT: s_branch .LBB53_4
; GFX11-TRUE16-NEXT: .LBB53_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr5
-; GFX11-TRUE16-NEXT: s_branch .LBB53_2
-; GFX11-TRUE16-NEXT: .LBB53_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -25549,7 +24683,7 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s40 :: v_dual_mov_b32 v23, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s15 :: v_dual_mov_b32 v21, s6
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, s14 :: v_dual_mov_b32 v19, s5
-; GFX11-TRUE16-NEXT: .LBB53_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB53_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, v32 :: v_dual_mov_b32 v31, v31
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v22, v22
@@ -25589,7 +24723,7 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s28, 16
@@ -25610,7 +24744,7 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f64 v[13:14], s[28:29], 1.0
; GFX11-FAKE16-NEXT: v_add_f64 v[15:16], s[26:27], 1.0
@@ -25639,28 +24773,8 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v19
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v1
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v0
-; GFX11-FAKE16-NEXT: s_branch .LBB53_5
+; GFX11-FAKE16-NEXT: s_branch .LBB53_4
; GFX11-FAKE16-NEXT: .LBB53_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr5
-; GFX11-FAKE16-NEXT: s_branch .LBB53_2
-; GFX11-FAKE16-NEXT: .LBB53_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v19, s2
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v10, s20
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s18 :: v_dual_mov_b32 v8, s22
@@ -25679,7 +24793,7 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s11 :: v_dual_mov_b32 v29, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s8 :: v_dual_mov_b32 v25, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s6 :: v_dual_mov_b32 v23, s5
-; GFX11-FAKE16-NEXT: .LBB53_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB53_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
@@ -26597,7 +25711,7 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; SI-NEXT: v_writelane_b32 v32, s66, 14
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v32, s67, 15
-; SI-NEXT: s_cbranch_scc0 .LBB55_3
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s91, 16
@@ -26653,7 +25767,7 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s53, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB55_4
+; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s91
; SI-NEXT: v_cvt_f32_f16_e32 v2, s90
@@ -26799,11 +25913,8 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_or_b32_e32 v17, v18, v17
-; SI-NEXT: s_branch .LBB55_5
+; SI-NEXT: s_branch .LBB55_4
; SI-NEXT: .LBB55_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB55_2
-; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -26836,7 +25947,7 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB55_5: ; %end
+; SI-NEXT: .LBB55_4: ; %end
; SI-NEXT: v_readlane_b32 s67, v32, 15
; SI-NEXT: v_readlane_b32 s66, v32, 14
; SI-NEXT: v_readlane_b32 s65, v32, 13
@@ -26905,7 +26016,7 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; VI-NEXT: v_writelane_b32 v32, s66, 14
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_writelane_b32 v32, s67, 15
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s91, 16
@@ -26961,7 +26072,7 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s53, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB55_4
+; VI-NEXT: s_cbranch_execnz .LBB55_3
; VI-NEXT: .LBB55_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v17, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s91
@@ -27036,11 +26147,8 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; VI-NEXT: v_add_f16_sdwa v18, v18, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v17, s6, v17
; VI-NEXT: v_or_b32_e32 v17, v17, v18
-; VI-NEXT: s_branch .LBB55_5
+; VI-NEXT: s_branch .LBB55_4
; VI-NEXT: .LBB55_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB55_2
-; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -27073,7 +26181,7 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB55_5: ; %end
+; VI-NEXT: .LBB55_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v32, 15
; VI-NEXT: v_readlane_b32 s66, v32, 14
; VI-NEXT: v_readlane_b32 s65, v32, 13
@@ -27154,9 +26262,9 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s51, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s52, s58, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s53, s56, s57
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
+; GFX9-NEXT: s_cbranch_execnz .LBB55_3
; GFX9-NEXT: .LBB55_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v17, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v17 op_sel_hi:[1,0]
@@ -27177,10 +26285,8 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v15, s51, v17 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v16, s52, v17 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v17, s53, v17 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB55_5
+; GFX9-NEXT: s_branch .LBB55_4
; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
-; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -27213,7 +26319,7 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB55_5: ; %end
+; GFX9-NEXT: .LBB55_4: ; %end
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
; GFX9-NEXT: v_readlane_b32 s52, v32, 8
; GFX9-NEXT: v_readlane_b32 s51, v32, 7
@@ -27272,10 +26378,10 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s27, s15
; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-NEXT: .LBB55_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -27297,8 +26403,6 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
-; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -28135,7 +27239,7 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; SI-NEXT: s_lshr_b32 s51, s52, 16
; SI-NEXT: v_readfirstlane_b32 s4, v4
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB57_4
+; SI-NEXT: s_cbranch_scc0 .LBB57_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s5, s17, 0xffff
; SI-NEXT: s_lshl_b32 s7, s90, 16
@@ -28413,26 +27517,6 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB57_4:
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: s_branch .LBB57_2
;
; VI-LABEL: bitcast_v36i16_to_v36f16_scalar:
; VI: ; %bb.0:
@@ -28461,7 +27545,7 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s41, 16
; VI-NEXT: v_readfirstlane_b32 s4, v4
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB57_3
; VI-NEXT: .LBB57_2: ; %cmp.true
@@ -28575,8 +27659,6 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v16, s11
; VI-NEXT: v_mov_b32_e32 v17, s7
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v36i16_to_v36f16_scalar:
; GFX9: ; %bb.0:
@@ -28605,9 +27687,9 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s44, s56, 16
; GFX9-NEXT: v_readfirstlane_b32 s4, v4
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
+; GFX9-NEXT: s_cbranch_execnz .LBB57_3
; GFX9-NEXT: .LBB57_2: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s59, s47
; GFX9-NEXT: v_pk_add_u16 v17, s4, 3 op_sel_hi:[1,0]
@@ -28663,10 +27745,8 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v20, 16, v15
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v16
; GFX9-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX9-NEXT: s_branch .LBB57_5
+; GFX9-NEXT: s_branch .LBB57_4
; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
-; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v17, s59
; GFX9-NEXT: v_mov_b32_e32 v16, s58
; GFX9-NEXT: v_mov_b32_e32 v15, s57
@@ -28703,7 +27783,7 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v33, s8
; GFX9-NEXT: v_mov_b32_e32 v34, s7
; GFX9-NEXT: v_mov_b32_e32 v35, s6
-; GFX9-NEXT: .LBB57_5: ; %end
+; GFX9-NEXT: .LBB57_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -28766,10 +27846,10 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s0, 16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s46, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
@@ -28825,10 +27905,8 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-TRUE16-NEXT: s_branch .LBB57_5
+; GFX11-TRUE16-NEXT: s_branch .LBB57_4
; GFX11-TRUE16-NEXT: .LBB57_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB57_2
-; GFX11-TRUE16-NEXT: .LBB57_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
@@ -28847,7 +27925,7 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s9 :: v_dual_mov_b32 v31, s6
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s8 :: v_dual_mov_b32 v33, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s4 :: v_dual_mov_b32 v35, s5
-; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB57_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
@@ -28902,10 +27980,10 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s46, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s46, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-FAKE16-NEXT: .LBB57_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
@@ -28961,10 +28039,8 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v14
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v13
-; GFX11-FAKE16-NEXT: s_branch .LBB57_5
+; GFX11-FAKE16-NEXT: s_branch .LBB57_4
; GFX11-FAKE16-NEXT: .LBB57_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB57_2
-; GFX11-FAKE16-NEXT: .LBB57_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s25 :: v_dual_mov_b32 v8, s24
@@ -28983,7 +28059,7 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s9 :: v_dual_mov_b32 v31, s6
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s8 :: v_dual_mov_b32 v33, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s4 :: v_dual_mov_b32 v35, s5
-; GFX11-FAKE16-NEXT: .LBB57_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB57_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v0
@@ -29672,9 +28748,9 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 ; 4-byte Folded Spill
-; SI-NEXT: s_cbranch_scc0 .LBB59_3
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB59_4
+; SI-NEXT: s_cbranch_execnz .LBB59_3
; SI-NEXT: .LBB59_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s59
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -29830,10 +28906,8 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; SI-NEXT: v_lshr_b64 v[22:23], v[12:13], 16
; SI-NEXT: v_lshr_b64 v[20:21], v[14:15], 16
; SI-NEXT: v_lshr_b64 v[18:19], v[16:17], 16
-; SI-NEXT: s_branch .LBB59_5
+; SI-NEXT: s_branch .LBB59_4
; SI-NEXT: .LBB59_3:
-; SI-NEXT: s_branch .LBB59_2
-; SI-NEXT: .LBB59_4:
; SI-NEXT: v_mov_b32_e32 v37, s47
; SI-NEXT: v_mov_b32_e32 v51, s41
; SI-NEXT: v_mov_b32_e32 v38, s40
@@ -29874,7 +28948,7 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v22, s43
; SI-NEXT: v_mov_b32_e32 v20, s44
; SI-NEXT: v_mov_b32_e32 v18, s42
-; SI-NEXT: .LBB59_5: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v34
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v36
; SI-NEXT: v_or_b32_e32 v0, v2, v0
@@ -29965,9 +29039,9 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s59, s58, 16
; VI-NEXT: v_readfirstlane_b32 s4, v4
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
+; VI-NEXT: s_cbranch_execnz .LBB59_3
; VI-NEXT: .LBB59_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v18, 0x200
; VI-NEXT: v_add_f16_e32 v0, s16, v18
@@ -30006,10 +29080,8 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v19, s47, v18
; VI-NEXT: v_add_f16_e32 v17, s44, v18
; VI-NEXT: v_add_f16_e32 v18, s45, v18
-; VI-NEXT: s_branch .LBB59_5
+; VI-NEXT: s_branch .LBB59_4
; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
-; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v18, s45
; VI-NEXT: v_mov_b32_e32 v17, s44
; VI-NEXT: v_mov_b32_e32 v19, s47
@@ -30046,7 +29118,7 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v35, s43
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: .LBB59_5: ; %end
+; VI-NEXT: .LBB59_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v35, 16, v35
; VI-NEXT: v_lshlrev_b32_e32 v34, 16, v34
; VI-NEXT: v_lshlrev_b32_e32 v33, 16, v33
@@ -30112,9 +29184,9 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s44, s56, 16
; GFX9-NEXT: v_readfirstlane_b32 s4, v4
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
+; GFX9-NEXT: s_cbranch_execnz .LBB59_3
; GFX9-NEXT: .LBB59_2: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s59, s47
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
@@ -30171,10 +29243,8 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v20, 16, v15
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v16
; GFX9-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX9-NEXT: s_branch .LBB59_5
+; GFX9-NEXT: s_branch .LBB59_4
; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
-; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v17, s59
; GFX9-NEXT: v_mov_b32_e32 v16, s58
; GFX9-NEXT: v_mov_b32_e32 v15, s57
@@ -30211,7 +29281,7 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v33, s8
; GFX9-NEXT: v_mov_b32_e32 v34, s7
; GFX9-NEXT: v_mov_b32_e32 v35, s6
-; GFX9-NEXT: .LBB59_5: ; %end
+; GFX9-NEXT: .LBB59_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -30274,10 +29344,10 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s0, 16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s46, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
@@ -30333,10 +29403,8 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-TRUE16-NEXT: s_branch .LBB59_5
+; GFX11-TRUE16-NEXT: s_branch .LBB59_4
; GFX11-TRUE16-NEXT: .LBB59_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB59_2
-; GFX11-TRUE16-NEXT: .LBB59_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
@@ -30355,7 +29423,7 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s9 :: v_dual_mov_b32 v31, s6
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s8 :: v_dual_mov_b32 v33, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s4 :: v_dual_mov_b32 v35, s5
-; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB59_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
@@ -30410,10 +29478,10 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s46, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s46, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-FAKE16-NEXT: .LBB59_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
@@ -30469,10 +29537,8 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v14
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v13
-; GFX11-FAKE16-NEXT: s_branch .LBB59_5
+; GFX11-FAKE16-NEXT: s_branch .LBB59_4
; GFX11-FAKE16-NEXT: .LBB59_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB59_2
-; GFX11-FAKE16-NEXT: .LBB59_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s25 :: v_dual_mov_b32 v8, s24
@@ -30491,7 +29557,7 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s9 :: v_dual_mov_b32 v31, s6
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s8 :: v_dual_mov_b32 v33, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s4 :: v_dual_mov_b32 v35, s5
-; GFX11-FAKE16-NEXT: .LBB59_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB59_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v0
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
index d8c9c9a11cc2e..b70a2127d17f0 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
@@ -168,7 +168,7 @@ define inreg <20 x float> @bitcast_v20i32_to_v20f32_scalar(<20 x i32> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s10, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s11, v0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -214,8 +214,6 @@ define inreg <20 x float> @bitcast_v20i32_to_v20f32_scalar(<20 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v18, s7
; SI-NEXT: v_mov_b32_e32 v19, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v20i32_to_v20f32_scalar:
; VI: ; %bb.0:
@@ -228,7 +226,7 @@ define inreg <20 x float> @bitcast_v20i32_to_v20f32_scalar(<20 x i32> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s10, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s11, v0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -274,8 +272,6 @@ define inreg <20 x float> @bitcast_v20i32_to_v20f32_scalar(<20 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v18, s7
; VI-NEXT: v_mov_b32_e32 v19, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v20i32_to_v20f32_scalar:
; GFX9: ; %bb.0:
@@ -288,7 +284,7 @@ define inreg <20 x float> @bitcast_v20i32_to_v20f32_scalar(<20 x i32> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s10, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s11, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -334,8 +330,6 @@ define inreg <20 x float> @bitcast_v20i32_to_v20f32_scalar(<20 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v18, s7
; GFX9-NEXT: v_mov_b32_e32 v19, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v20i32_to_v20f32_scalar:
; GFX11: ; %bb.0:
@@ -345,7 +339,7 @@ define inreg <20 x float> @bitcast_v20i32_to_v20f32_scalar(<20 x i32> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s5, v0
; GFX11-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -383,8 +377,6 @@ define inreg <20 x float> @bitcast_v20i32_to_v20f32_scalar(<20 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: v_dual_mov_b32 v18, s5 :: v_dual_mov_b32 v19, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -584,9 +576,9 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v19, s55, 1.0
; SI-NEXT: v_add_f32_e64 v18, s54, 1.0
@@ -608,10 +600,8 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB3_5
+; SI-NEXT: s_branch .LBB3_4
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -644,7 +634,7 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB3_5: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -703,9 +693,9 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v19, s55, 1.0
; VI-NEXT: v_add_f32_e64 v18, s54, 1.0
@@ -727,10 +717,8 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB3_5
+; VI-NEXT: s_branch .LBB3_4
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -763,7 +751,7 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB3_5: ; %end
+; VI-NEXT: .LBB3_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -822,9 +810,9 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v19, s55, 1.0
; GFX9-NEXT: v_add_f32_e64 v18, s54, 1.0
@@ -846,10 +834,8 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB3_5
+; GFX9-NEXT: s_branch .LBB3_4
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -882,7 +868,7 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB3_5: ; %end
+; GFX9-NEXT: .LBB3_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -942,10 +928,10 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v19, s55, 1.0
; GFX11-NEXT: v_add_f32_e64 v18, s54, 1.0
@@ -967,10 +953,8 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB3_5
+; GFX11-NEXT: s_branch .LBB3_4
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -987,7 +971,7 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB3_5: ; %end
+; GFX11-NEXT: .LBB3_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -1184,7 +1168,7 @@ define inreg <10 x i64> @bitcast_v20i32_to_v10i64_scalar(<20 x i32> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s10, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s11, v0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB5_3
; SI-NEXT: .LBB5_2: ; %cmp.true
@@ -1230,8 +1214,6 @@ define inreg <10 x i64> @bitcast_v20i32_to_v10i64_scalar(<20 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v18, s7
; SI-NEXT: v_mov_b32_e32 v19, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v20i32_to_v10i64_scalar:
; VI: ; %bb.0:
@@ -1244,7 +1226,7 @@ define inreg <10 x i64> @bitcast_v20i32_to_v10i64_scalar(<20 x i32> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s10, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s11, v0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -1290,8 +1272,6 @@ define inreg <10 x i64> @bitcast_v20i32_to_v10i64_scalar(<20 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v18, s7
; VI-NEXT: v_mov_b32_e32 v19, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v20i32_to_v10i64_scalar:
; GFX9: ; %bb.0:
@@ -1304,7 +1284,7 @@ define inreg <10 x i64> @bitcast_v20i32_to_v10i64_scalar(<20 x i32> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s10, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s11, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -1350,8 +1330,6 @@ define inreg <10 x i64> @bitcast_v20i32_to_v10i64_scalar(<20 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v18, s7
; GFX9-NEXT: v_mov_b32_e32 v19, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v20i32_to_v10i64_scalar:
; GFX11: ; %bb.0:
@@ -1361,7 +1339,7 @@ define inreg <10 x i64> @bitcast_v20i32_to_v10i64_scalar(<20 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s5, v0
; GFX11-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -1399,8 +1377,6 @@ define inreg <10 x i64> @bitcast_v20i32_to_v10i64_scalar(<20 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: v_dual_mov_b32 v18, s5 :: v_dual_mov_b32 v19, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1585,7 +1561,7 @@ define inreg <20 x i32> @bitcast_v10i64_to_v20i32_scalar(<10 x i64> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s10, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s11, v0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB7_3
; SI-NEXT: .LBB7_2: ; %cmp.true
@@ -1631,8 +1607,6 @@ define inreg <20 x i32> @bitcast_v10i64_to_v20i32_scalar(<10 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v18, s7
; SI-NEXT: v_mov_b32_e32 v19, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v10i64_to_v20i32_scalar:
; VI: ; %bb.0:
@@ -1645,7 +1619,7 @@ define inreg <20 x i32> @bitcast_v10i64_to_v20i32_scalar(<10 x i64> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s10, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s11, v0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -1691,8 +1665,6 @@ define inreg <20 x i32> @bitcast_v10i64_to_v20i32_scalar(<10 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v18, s7
; VI-NEXT: v_mov_b32_e32 v19, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v10i64_to_v20i32_scalar:
; GFX9: ; %bb.0:
@@ -1705,7 +1677,7 @@ define inreg <20 x i32> @bitcast_v10i64_to_v20i32_scalar(<10 x i64> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s10, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s11, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
@@ -1751,8 +1723,6 @@ define inreg <20 x i32> @bitcast_v10i64_to_v20i32_scalar(<10 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v18, s7
; GFX9-NEXT: v_mov_b32_e32 v19, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v10i64_to_v20i32_scalar:
; GFX11: ; %bb.0:
@@ -1762,7 +1732,7 @@ define inreg <20 x i32> @bitcast_v10i64_to_v20i32_scalar(<10 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s5, v0
; GFX11-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
@@ -1800,8 +1770,6 @@ define inreg <20 x i32> @bitcast_v10i64_to_v20i32_scalar(<10 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: v_dual_mov_b32 v18, s5 :: v_dual_mov_b32 v19, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1981,7 +1949,7 @@ define inreg <10 x double> @bitcast_v20i32_to_v10f64_scalar(<20 x i32> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s10, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s11, v0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB9_3
; SI-NEXT: .LBB9_2: ; %cmp.true
@@ -2027,8 +1995,6 @@ define inreg <10 x double> @bitcast_v20i32_to_v10f64_scalar(<20 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v18, s7
; SI-NEXT: v_mov_b32_e32 v19, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v20i32_to_v10f64_scalar:
; VI: ; %bb.0:
@@ -2041,7 +2007,7 @@ define inreg <10 x double> @bitcast_v20i32_to_v10f64_scalar(<20 x i32> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s10, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s11, v0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -2087,8 +2053,6 @@ define inreg <10 x double> @bitcast_v20i32_to_v10f64_scalar(<20 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v18, s7
; VI-NEXT: v_mov_b32_e32 v19, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v20i32_to_v10f64_scalar:
; GFX9: ; %bb.0:
@@ -2101,7 +2065,7 @@ define inreg <10 x double> @bitcast_v20i32_to_v10f64_scalar(<20 x i32> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s10, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s11, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -2147,8 +2111,6 @@ define inreg <10 x double> @bitcast_v20i32_to_v10f64_scalar(<20 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v18, s7
; GFX9-NEXT: v_mov_b32_e32 v19, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v20i32_to_v10f64_scalar:
; GFX11: ; %bb.0:
@@ -2158,7 +2120,7 @@ define inreg <10 x double> @bitcast_v20i32_to_v10f64_scalar(<20 x i32> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s5, v0
; GFX11-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -2196,8 +2158,6 @@ define inreg <10 x double> @bitcast_v20i32_to_v10f64_scalar(<20 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: v_dual_mov_b32 v18, s5 :: v_dual_mov_b32 v19, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2367,9 +2327,9 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
; SI-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
@@ -2381,10 +2341,8 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; SI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; SI-NEXT: s_branch .LBB11_5
+; SI-NEXT: s_branch .LBB11_4
; SI-NEXT: .LBB11_3:
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -2417,7 +2375,7 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB11_5: ; %end
+; SI-NEXT: .LBB11_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -2476,9 +2434,9 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
; VI-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
@@ -2490,10 +2448,8 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; VI-NEXT: s_branch .LBB11_5
+; VI-NEXT: s_branch .LBB11_4
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -2526,7 +2482,7 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB11_5: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -2585,9 +2541,9 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
; GFX9-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
@@ -2599,10 +2555,8 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX9-NEXT: s_branch .LBB11_5
+; GFX9-NEXT: s_branch .LBB11_4
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -2635,7 +2589,7 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB11_5: ; %end
+; GFX9-NEXT: .LBB11_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -2695,10 +2649,10 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
; GFX11-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
@@ -2710,10 +2664,8 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX11-NEXT: s_branch .LBB11_5
+; GFX11-NEXT: s_branch .LBB11_4
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -2730,7 +2682,7 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB11_5: ; %end
+; GFX11-NEXT: .LBB11_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -3355,7 +3307,7 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s9, v1
; SI-NEXT: s_cmp_lg_u32 s8, 0
; SI-NEXT: v_readfirstlane_b32 s8, v0
-; SI-NEXT: s_cbranch_scc0 .LBB13_4
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s72, s5, 16
; SI-NEXT: s_lshr_b32 s73, s7, 16
@@ -3501,28 +3453,6 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v18, s4
; SI-NEXT: v_mov_b32_e32 v19, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB13_4:
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: s_branch .LBB13_2
;
; VI-LABEL: bitcast_v20i32_to_v40i16_scalar:
; VI: ; %bb.0:
@@ -3535,7 +3465,7 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s10, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s11, v0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s12, s6, 16
; VI-NEXT: s_lshr_b32 s13, s7, 16
@@ -3681,28 +3611,6 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v18, s7
; VI-NEXT: v_mov_b32_e32 v19, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v20i32_to_v40i16_scalar:
; GFX9: ; %bb.0:
@@ -3715,7 +3623,7 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s10, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s11, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s12, s6, 16
; GFX9-NEXT: s_lshr_b32 s13, s7, 16
@@ -3821,28 +3729,6 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v18, s7
; GFX9-NEXT: v_mov_b32_e32 v19, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v20i32_to_v40i16_scalar:
; GFX11: ; %bb.0:
@@ -3852,7 +3738,7 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s5, v0
; GFX11-NEXT: s_mov_b32 s58, 0
; GFX11-NEXT: s_cmp_lg_u32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s6, s4, 16
; GFX11-NEXT: s_lshr_b32 s7, s5, 16
@@ -3950,28 +3836,6 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v16, s9 :: v_dual_mov_b32 v17, s8
; GFX11-NEXT: v_dual_mov_b32 v18, s5 :: v_dual_mov_b32 v19, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4889,7 +4753,7 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; SI-NEXT: v_writelane_b32 v20, s66, 14
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v20, s67, 15
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s95, 16
@@ -5095,9 +4959,6 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v40i16_to_v20i32_scalar:
; VI: ; %bb.0:
@@ -5153,7 +5014,7 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; VI-NEXT: v_writelane_b32 v20, s66, 18
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_writelane_b32 v20, s67, 19
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s35, 16
@@ -5363,9 +5224,6 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v40i16_to_v20i32_scalar:
; GFX9: ; %bb.0:
@@ -5433,9 +5291,9 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s53, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s54, s58, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s55, s56, s57
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -5457,10 +5315,8 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v17, s53, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v18, s54, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v19, s55, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB15_5
+; GFX9-NEXT: s_branch .LBB15_4
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -5493,7 +5349,7 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB15_5: ; %end
+; GFX9-NEXT: .LBB15_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -5560,10 +5416,10 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
; GFX11-NEXT: s_pack_ll_b32_b16 s18, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s19, s45, s58
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -5587,8 +5443,6 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v19, s19, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -6213,7 +6067,7 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s9, v1
; SI-NEXT: s_cmp_lg_u32 s8, 0
; SI-NEXT: v_readfirstlane_b32 s8, v0
-; SI-NEXT: s_cbranch_scc0 .LBB17_4
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s72, s5, 16
; SI-NEXT: s_lshr_b32 s73, s7, 16
@@ -6359,28 +6213,6 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v18, s4
; SI-NEXT: v_mov_b32_e32 v19, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB17_4:
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: s_branch .LBB17_2
;
; VI-LABEL: bitcast_v20i32_to_v40f16_scalar:
; VI: ; %bb.0:
@@ -6393,7 +6225,7 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s10, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s11, v0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s12, s6, 16
; VI-NEXT: s_lshr_b32 s13, s7, 16
@@ -6539,28 +6371,6 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v18, s7
; VI-NEXT: v_mov_b32_e32 v19, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v20i32_to_v40f16_scalar:
; GFX9: ; %bb.0:
@@ -6573,7 +6383,7 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s10, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s11, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s12, s6, 16
; GFX9-NEXT: s_lshr_b32 s13, s7, 16
@@ -6679,28 +6489,6 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v18, s7
; GFX9-NEXT: v_mov_b32_e32 v19, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v20i32_to_v40f16_scalar:
; GFX11: ; %bb.0:
@@ -6710,7 +6498,7 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s5, v0
; GFX11-NEXT: s_mov_b32 s58, 0
; GFX11-NEXT: s_cmp_lg_u32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s6, s4, 16
; GFX11-NEXT: s_lshr_b32 s7, s5, 16
@@ -6808,28 +6596,6 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v16, s9 :: v_dual_mov_b32 v17, s8
; GFX11-NEXT: v_dual_mov_b32 v18, s5 :: v_dual_mov_b32 v19, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7832,7 +7598,7 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; SI-NEXT: v_writelane_b32 v32, s66, 14
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v32, s67, 15
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s95, 16
@@ -7894,7 +7660,7 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s55, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB19_4
+; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s95
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -8056,11 +7822,8 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v18, v19, v18
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v20
; SI-NEXT: v_or_b32_e32 v19, v21, v19
-; SI-NEXT: s_branch .LBB19_5
+; SI-NEXT: s_branch .LBB19_4
; SI-NEXT: .LBB19_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB19_2
-; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -8093,7 +7856,7 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB19_5: ; %end
+; SI-NEXT: .LBB19_4: ; %end
; SI-NEXT: v_readlane_b32 s67, v32, 15
; SI-NEXT: v_readlane_b32 s66, v32, 14
; SI-NEXT: v_readlane_b32 s65, v32, 13
@@ -8170,7 +7933,7 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; VI-NEXT: v_writelane_b32 v32, s66, 18
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_writelane_b32 v32, s67, 19
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s35, 16
@@ -8232,7 +7995,7 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s55, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB19_4
+; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v19, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s35
@@ -8315,11 +8078,8 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; VI-NEXT: v_add_f16_sdwa v20, v20, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v19, s6, v19
; VI-NEXT: v_or_b32_e32 v19, v19, v20
-; VI-NEXT: s_branch .LBB19_5
+; VI-NEXT: s_branch .LBB19_4
; VI-NEXT: .LBB19_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB19_2
-; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -8352,7 +8112,7 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB19_5: ; %end
+; VI-NEXT: .LBB19_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v32, 19
; VI-NEXT: v_readlane_b32 s66, v32, 18
; VI-NEXT: v_readlane_b32 s65, v32, 17
@@ -8445,9 +8205,9 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s53, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s54, s58, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s55, s56, s57
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v19, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v19 op_sel_hi:[1,0]
@@ -8470,10 +8230,8 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v17, s53, v19 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v18, s54, v19 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v19, s55, v19 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB19_5
+; GFX9-NEXT: s_branch .LBB19_4
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -8506,7 +8264,7 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB19_5: ; %end
+; GFX9-NEXT: .LBB19_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -8573,10 +8331,10 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
; GFX11-NEXT: s_pack_ll_b32_b16 s18, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s19, s45, s58
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -8600,8 +8358,6 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v19, 0x200, s19 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -8818,9 +8574,9 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB21_3
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_4
+; SI-NEXT: s_cbranch_execnz .LBB21_3
; SI-NEXT: .LBB21_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v19, s55, 1.0
; SI-NEXT: v_add_f32_e64 v18, s54, 1.0
@@ -8842,10 +8598,8 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB21_5
+; SI-NEXT: s_branch .LBB21_4
; SI-NEXT: .LBB21_3:
-; SI-NEXT: s_branch .LBB21_2
-; SI-NEXT: .LBB21_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -8878,7 +8632,7 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB21_5: ; %end
+; SI-NEXT: .LBB21_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -8937,9 +8691,9 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB21_3
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_4
+; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v19, s55, 1.0
; VI-NEXT: v_add_f32_e64 v18, s54, 1.0
@@ -8961,10 +8715,8 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB21_5
+; VI-NEXT: s_branch .LBB21_4
; VI-NEXT: .LBB21_3:
-; VI-NEXT: s_branch .LBB21_2
-; VI-NEXT: .LBB21_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -8997,7 +8749,7 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB21_5: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -9056,9 +8808,9 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
+; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v19, s55, 1.0
; GFX9-NEXT: v_add_f32_e64 v18, s54, 1.0
@@ -9080,10 +8832,8 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB21_5
+; GFX9-NEXT: s_branch .LBB21_4
; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
-; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -9116,7 +8866,7 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB21_5: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -9176,10 +8926,10 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
; GFX11-NEXT: .LBB21_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v19, s55, 1.0
; GFX11-NEXT: v_add_f32_e64 v18, s54, 1.0
@@ -9201,10 +8951,8 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB21_5
+; GFX11-NEXT: s_branch .LBB21_4
; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
-; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -9221,7 +8969,7 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB21_5: ; %end
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -9423,7 +9171,7 @@ define inreg <20 x float> @bitcast_v10i64_to_v20f32_scalar(<10 x i64> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s10, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s11, v0
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB23_3
; SI-NEXT: .LBB23_2: ; %cmp.true
@@ -9469,8 +9217,6 @@ define inreg <20 x float> @bitcast_v10i64_to_v20f32_scalar(<10 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v18, s7
; SI-NEXT: v_mov_b32_e32 v19, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v10i64_to_v20f32_scalar:
; VI: ; %bb.0:
@@ -9483,7 +9229,7 @@ define inreg <20 x float> @bitcast_v10i64_to_v20f32_scalar(<10 x i64> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s10, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s11, v0
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
@@ -9529,8 +9275,6 @@ define inreg <20 x float> @bitcast_v10i64_to_v20f32_scalar(<10 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v18, s7
; VI-NEXT: v_mov_b32_e32 v19, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v10i64_to_v20f32_scalar:
; GFX9: ; %bb.0:
@@ -9543,7 +9287,7 @@ define inreg <20 x float> @bitcast_v10i64_to_v20f32_scalar(<10 x i64> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s10, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s11, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
@@ -9589,8 +9333,6 @@ define inreg <20 x float> @bitcast_v10i64_to_v20f32_scalar(<10 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v18, s7
; GFX9-NEXT: v_mov_b32_e32 v19, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v10i64_to_v20f32_scalar:
; GFX11: ; %bb.0:
@@ -9600,7 +9342,7 @@ define inreg <20 x float> @bitcast_v10i64_to_v20f32_scalar(<10 x i64> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s5, v0
; GFX11-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
@@ -9638,8 +9380,6 @@ define inreg <20 x float> @bitcast_v10i64_to_v20f32_scalar(<10 x i64> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: v_dual_mov_b32 v18, s5 :: v_dual_mov_b32 v19, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: s_branch .LBB23_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9839,9 +9579,9 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB25_3
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_4
+; SI-NEXT: s_cbranch_execnz .LBB25_3
; SI-NEXT: .LBB25_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v19, s55, 1.0
; SI-NEXT: v_add_f32_e64 v18, s54, 1.0
@@ -9863,10 +9603,8 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB25_5
+; SI-NEXT: s_branch .LBB25_4
; SI-NEXT: .LBB25_3:
-; SI-NEXT: s_branch .LBB25_2
-; SI-NEXT: .LBB25_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -9899,7 +9637,7 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB25_5: ; %end
+; SI-NEXT: .LBB25_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -9958,9 +9696,9 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB25_3
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_4
+; VI-NEXT: s_cbranch_execnz .LBB25_3
; VI-NEXT: .LBB25_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v19, s55, 1.0
; VI-NEXT: v_add_f32_e64 v18, s54, 1.0
@@ -9982,10 +9720,8 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB25_5
+; VI-NEXT: s_branch .LBB25_4
; VI-NEXT: .LBB25_3:
-; VI-NEXT: s_branch .LBB25_2
-; VI-NEXT: .LBB25_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -10018,7 +9754,7 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB25_5: ; %end
+; VI-NEXT: .LBB25_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -10077,9 +9813,9 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_4
+; GFX9-NEXT: s_cbranch_execnz .LBB25_3
; GFX9-NEXT: .LBB25_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v19, s55, 1.0
; GFX9-NEXT: v_add_f32_e64 v18, s54, 1.0
@@ -10101,10 +9837,8 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB25_5
+; GFX9-NEXT: s_branch .LBB25_4
; GFX9-NEXT: .LBB25_3:
-; GFX9-NEXT: s_branch .LBB25_2
-; GFX9-NEXT: .LBB25_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -10137,7 +9871,7 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB25_5: ; %end
+; GFX9-NEXT: .LBB25_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -10197,10 +9931,10 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_3
; GFX11-NEXT: .LBB25_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v19, s55, 1.0
; GFX11-NEXT: v_add_f32_e64 v18, s54, 1.0
@@ -10222,10 +9956,8 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB25_5
+; GFX11-NEXT: s_branch .LBB25_4
; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
-; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -10242,7 +9974,7 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB25_5: ; %end
+; GFX11-NEXT: .LBB25_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -10429,9 +10161,9 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB27_3
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB27_4
+; SI-NEXT: s_cbranch_execnz .LBB27_3
; SI-NEXT: .LBB27_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
; SI-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
@@ -10443,10 +10175,8 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; SI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; SI-NEXT: s_branch .LBB27_5
+; SI-NEXT: s_branch .LBB27_4
; SI-NEXT: .LBB27_3:
-; SI-NEXT: s_branch .LBB27_2
-; SI-NEXT: .LBB27_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -10479,7 +10209,7 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB27_5: ; %end
+; SI-NEXT: .LBB27_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -10538,9 +10268,9 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB27_3
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_4
+; VI-NEXT: s_cbranch_execnz .LBB27_3
; VI-NEXT: .LBB27_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
; VI-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
@@ -10552,10 +10282,8 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; VI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; VI-NEXT: s_branch .LBB27_5
+; VI-NEXT: s_branch .LBB27_4
; VI-NEXT: .LBB27_3:
-; VI-NEXT: s_branch .LBB27_2
-; VI-NEXT: .LBB27_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -10588,7 +10316,7 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB27_5: ; %end
+; VI-NEXT: .LBB27_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -10647,9 +10375,9 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_4
+; GFX9-NEXT: s_cbranch_execnz .LBB27_3
; GFX9-NEXT: .LBB27_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
; GFX9-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
@@ -10661,10 +10389,8 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; GFX9-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX9-NEXT: s_branch .LBB27_5
+; GFX9-NEXT: s_branch .LBB27_4
; GFX9-NEXT: .LBB27_3:
-; GFX9-NEXT: s_branch .LBB27_2
-; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -10697,7 +10423,7 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB27_5: ; %end
+; GFX9-NEXT: .LBB27_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -10757,10 +10483,10 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB27_3
; GFX11-NEXT: .LBB27_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
; GFX11-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
@@ -10772,10 +10498,8 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; GFX11-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX11-NEXT: s_branch .LBB27_5
+; GFX11-NEXT: s_branch .LBB27_4
; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
-; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -10792,7 +10516,7 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB27_5: ; %end
+; GFX11-NEXT: .LBB27_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -11397,7 +11121,7 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s9, v1
; SI-NEXT: s_cmp_lg_u32 s8, 0
; SI-NEXT: v_readfirstlane_b32 s8, v0
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s89, s5, 16
; SI-NEXT: s_lshr_b32 s88, s7, 16
@@ -11419,7 +11143,7 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[56:57], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[58:59], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB29_4
+; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v19, s5, 1.0
; SI-NEXT: v_add_f32_e64 v18, s4, 1.0
@@ -11461,30 +11185,8 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; SI-NEXT: s_branch .LBB29_5
+; SI-NEXT: s_branch .LBB29_4
; SI-NEXT: .LBB29_3:
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: s_branch .LBB29_2
-; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -11525,7 +11227,7 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v27, s56
; SI-NEXT: v_mov_b32_e32 v28, s58
; SI-NEXT: v_mov_b32_e32 v29, s60
-; SI-NEXT: .LBB29_5: ; %end
+; SI-NEXT: .LBB29_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28
@@ -11599,7 +11301,7 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s10, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s11, v0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s12, s6, 16
; VI-NEXT: s_lshr_b32 s13, s7, 16
@@ -11621,7 +11323,7 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; VI-NEXT: s_lshr_b32 s61, s18, 16
; VI-NEXT: s_lshr_b32 s62, s17, 16
; VI-NEXT: s_lshr_b32 s63, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB29_4
+; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v19, s6, 1.0
; VI-NEXT: v_add_f32_e64 v18, s7, 1.0
@@ -11663,30 +11365,8 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; VI-NEXT: s_branch .LBB29_5
+; VI-NEXT: s_branch .LBB29_4
; VI-NEXT: .LBB29_3:
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: s_branch .LBB29_2
-; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -11727,7 +11407,7 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v22, s14
; VI-NEXT: v_mov_b32_e32 v21, s13
; VI-NEXT: v_mov_b32_e32 v20, s12
-; VI-NEXT: .LBB29_5: ; %end
+; VI-NEXT: .LBB29_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v39, 16, v39
; VI-NEXT: v_lshlrev_b32_e32 v38, 16, v38
; VI-NEXT: v_lshlrev_b32_e32 v37, 16, v37
@@ -11781,7 +11461,7 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s10, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s11, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s12, s6, 16
; GFX9-NEXT: s_lshr_b32 s13, s7, 16
@@ -11803,7 +11483,7 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX9-NEXT: s_lshr_b32 s61, s18, 16
; GFX9-NEXT: s_lshr_b32 s62, s17, 16
; GFX9-NEXT: s_lshr_b32 s63, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
+; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v19, s6, 1.0
; GFX9-NEXT: v_add_f32_e64 v18, s7, 1.0
@@ -11845,30 +11525,8 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX9-NEXT: s_branch .LBB29_5
+; GFX9-NEXT: s_branch .LBB29_4
; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: s_branch .LBB29_2
-; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -11909,7 +11567,7 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v22, s14
; GFX9-NEXT: v_mov_b32_e32 v21, s13
; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: .LBB29_5: ; %end
+; GFX9-NEXT: .LBB29_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -11960,7 +11618,7 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s6, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s4, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s5, 16
@@ -11983,7 +11641,7 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s57, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s58, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f32_e64 v19, s4, 1.0
; GFX11-TRUE16-NEXT: v_add_f32_e64 v18, s5, 1.0
@@ -12025,30 +11683,8 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB29_5
+; GFX11-TRUE16-NEXT: s_branch .LBB29_4
; GFX11-TRUE16-NEXT: .LBB29_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
-; GFX11-TRUE16-NEXT: s_branch .LBB29_2
-; GFX11-TRUE16-NEXT: .LBB29_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -12069,7 +11705,7 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
-; GFX11-TRUE16-NEXT: .LBB29_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB29_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
@@ -12111,7 +11747,7 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s6, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s4, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s5, 16
@@ -12134,7 +11770,7 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s57, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s58, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f32_e64 v15, s4, 1.0
; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, s5, 1.0
@@ -12176,30 +11812,8 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v3
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-FAKE16-NEXT: s_branch .LBB29_5
+; GFX11-FAKE16-NEXT: s_branch .LBB29_4
; GFX11-FAKE16-NEXT: .LBB29_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
-; GFX11-FAKE16-NEXT: s_branch .LBB29_2
-; GFX11-FAKE16-NEXT: .LBB29_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v3, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v1, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s16 :: v_dual_mov_b32 v9, s17
@@ -12220,7 +11834,7 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
-; GFX11-FAKE16-NEXT: .LBB29_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB29_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v1
@@ -13179,7 +12793,7 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; SI-NEXT: v_writelane_b32 v20, s66, 14
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v20, s67, 15
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s95, 16
@@ -13385,9 +12999,6 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v40i16_to_v20f32_scalar:
; VI: ; %bb.0:
@@ -13443,7 +13054,7 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; VI-NEXT: v_writelane_b32 v20, s66, 18
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_writelane_b32 v20, s67, 19
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s35, 16
@@ -13653,9 +13264,6 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v40i16_to_v20f32_scalar:
; GFX9: ; %bb.0:
@@ -13723,9 +13331,9 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s53, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s54, s58, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s55, s56, s57
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_4
+; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -13747,10 +13355,8 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v17, s53, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v18, s54, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v19, s55, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB31_5
+; GFX9-NEXT: s_branch .LBB31_4
; GFX9-NEXT: .LBB31_3:
-; GFX9-NEXT: s_branch .LBB31_2
-; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -13783,7 +13389,7 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB31_5: ; %end
+; GFX9-NEXT: .LBB31_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -13850,10 +13456,10 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
; GFX11-NEXT: s_pack_ll_b32_b16 s18, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s19, s45, s58
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-NEXT: .LBB31_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -13877,8 +13483,6 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v19, s19, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
-; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -14483,7 +14087,7 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s9, v1
; SI-NEXT: s_cmp_lg_u32 s8, 0
; SI-NEXT: v_readfirstlane_b32 s8, v0
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s89, s5, 16
; SI-NEXT: s_lshr_b32 s88, s7, 16
@@ -14505,7 +14109,7 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[56:57], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[58:59], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB33_4
+; SI-NEXT: s_cbranch_execnz .LBB33_3
; SI-NEXT: .LBB33_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v19, s5, 1.0
; SI-NEXT: v_add_f32_e64 v18, s4, 1.0
@@ -14547,30 +14151,8 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; SI-NEXT: s_branch .LBB33_5
+; SI-NEXT: s_branch .LBB33_4
; SI-NEXT: .LBB33_3:
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: s_branch .LBB33_2
-; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -14611,7 +14193,7 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v27, s56
; SI-NEXT: v_mov_b32_e32 v28, s58
; SI-NEXT: v_mov_b32_e32 v29, s60
-; SI-NEXT: .LBB33_5: ; %end
+; SI-NEXT: .LBB33_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28
@@ -14685,7 +14267,7 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s10, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s11, v0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s12, s6, 16
; VI-NEXT: s_lshr_b32 s13, s7, 16
@@ -14707,7 +14289,7 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; VI-NEXT: s_lshr_b32 s61, s18, 16
; VI-NEXT: s_lshr_b32 s62, s17, 16
; VI-NEXT: s_lshr_b32 s63, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB33_4
+; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v19, s6, 1.0
; VI-NEXT: v_add_f32_e64 v18, s7, 1.0
@@ -14749,30 +14331,8 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; VI-NEXT: s_branch .LBB33_5
+; VI-NEXT: s_branch .LBB33_4
; VI-NEXT: .LBB33_3:
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: s_branch .LBB33_2
-; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -14813,7 +14373,7 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v22, s14
; VI-NEXT: v_mov_b32_e32 v21, s13
; VI-NEXT: v_mov_b32_e32 v20, s12
-; VI-NEXT: .LBB33_5: ; %end
+; VI-NEXT: .LBB33_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v39, 16, v39
; VI-NEXT: v_lshlrev_b32_e32 v38, 16, v38
; VI-NEXT: v_lshlrev_b32_e32 v37, 16, v37
@@ -14867,7 +14427,7 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s10, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s11, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s12, s6, 16
; GFX9-NEXT: s_lshr_b32 s13, s7, 16
@@ -14889,7 +14449,7 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX9-NEXT: s_lshr_b32 s61, s18, 16
; GFX9-NEXT: s_lshr_b32 s62, s17, 16
; GFX9-NEXT: s_lshr_b32 s63, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
+; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v19, s6, 1.0
; GFX9-NEXT: v_add_f32_e64 v18, s7, 1.0
@@ -14931,30 +14491,8 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX9-NEXT: s_branch .LBB33_5
+; GFX9-NEXT: s_branch .LBB33_4
; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: s_branch .LBB33_2
-; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -14995,7 +14533,7 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v22, s14
; GFX9-NEXT: v_mov_b32_e32 v21, s13
; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: .LBB33_5: ; %end
+; GFX9-NEXT: .LBB33_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -15046,7 +14584,7 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s6, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s4, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s5, 16
@@ -15069,7 +14607,7 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s57, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s58, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f32_e64 v19, s4, 1.0
; GFX11-TRUE16-NEXT: v_add_f32_e64 v18, s5, 1.0
@@ -15111,30 +14649,8 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB33_5
+; GFX11-TRUE16-NEXT: s_branch .LBB33_4
; GFX11-TRUE16-NEXT: .LBB33_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
-; GFX11-TRUE16-NEXT: s_branch .LBB33_2
-; GFX11-TRUE16-NEXT: .LBB33_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -15155,7 +14671,7 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
-; GFX11-TRUE16-NEXT: .LBB33_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB33_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
@@ -15197,7 +14713,7 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s6, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s4, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s5, 16
@@ -15220,7 +14736,7 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s57, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s58, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f32_e64 v15, s4, 1.0
; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, s5, 1.0
@@ -15262,30 +14778,8 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v3
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-FAKE16-NEXT: s_branch .LBB33_5
+; GFX11-FAKE16-NEXT: s_branch .LBB33_4
; GFX11-FAKE16-NEXT: .LBB33_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
-; GFX11-FAKE16-NEXT: s_branch .LBB33_2
-; GFX11-FAKE16-NEXT: .LBB33_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v3, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v1, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s16 :: v_dual_mov_b32 v9, s17
@@ -15306,7 +14800,7 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
-; GFX11-FAKE16-NEXT: .LBB33_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB33_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v1
@@ -16350,7 +15844,7 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; SI-NEXT: v_writelane_b32 v32, s66, 14
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v32, s67, 15
-; SI-NEXT: s_cbranch_scc0 .LBB35_3
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s95, 16
@@ -16412,7 +15906,7 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s55, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB35_4
+; SI-NEXT: s_cbranch_execnz .LBB35_3
; SI-NEXT: .LBB35_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s95
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -16574,11 +16068,8 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v18, v19, v18
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v20
; SI-NEXT: v_or_b32_e32 v19, v21, v19
-; SI-NEXT: s_branch .LBB35_5
+; SI-NEXT: s_branch .LBB35_4
; SI-NEXT: .LBB35_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB35_2
-; SI-NEXT: .LBB35_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -16611,7 +16102,7 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB35_5: ; %end
+; SI-NEXT: .LBB35_4: ; %end
; SI-NEXT: v_readlane_b32 s67, v32, 15
; SI-NEXT: v_readlane_b32 s66, v32, 14
; SI-NEXT: v_readlane_b32 s65, v32, 13
@@ -16688,7 +16179,7 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; VI-NEXT: v_writelane_b32 v32, s66, 18
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_writelane_b32 v32, s67, 19
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s35, 16
@@ -16750,7 +16241,7 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s55, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB35_4
+; VI-NEXT: s_cbranch_execnz .LBB35_3
; VI-NEXT: .LBB35_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v19, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s35
@@ -16833,11 +16324,8 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; VI-NEXT: v_add_f16_sdwa v20, v20, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v19, s6, v19
; VI-NEXT: v_or_b32_e32 v19, v19, v20
-; VI-NEXT: s_branch .LBB35_5
+; VI-NEXT: s_branch .LBB35_4
; VI-NEXT: .LBB35_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB35_2
-; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -16870,7 +16358,7 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB35_5: ; %end
+; VI-NEXT: .LBB35_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v32, 19
; VI-NEXT: v_readlane_b32 s66, v32, 18
; VI-NEXT: v_readlane_b32 s65, v32, 17
@@ -16963,9 +16451,9 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s53, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s54, s58, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s55, s56, s57
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
+; GFX9-NEXT: s_cbranch_execnz .LBB35_3
; GFX9-NEXT: .LBB35_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v19, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v19 op_sel_hi:[1,0]
@@ -16988,10 +16476,8 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v17, s53, v19 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v18, s54, v19 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v19, s55, v19 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB35_5
+; GFX9-NEXT: s_branch .LBB35_4
; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
-; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -17024,7 +16510,7 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB35_5: ; %end
+; GFX9-NEXT: .LBB35_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -17091,10 +16577,10 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
; GFX11-NEXT: s_pack_ll_b32_b16 s18, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s19, s45, s58
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-NEXT: .LBB35_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -17118,8 +16604,6 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v19, 0x200, s19 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
-; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -17321,7 +16805,7 @@ define inreg <10 x double> @bitcast_v10i64_to_v10f64_scalar(<10 x i64> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s10, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s11, v0
-; SI-NEXT: s_cbranch_scc0 .LBB37_4
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB37_3
; SI-NEXT: .LBB37_2: ; %cmp.true
@@ -17367,8 +16851,6 @@ define inreg <10 x double> @bitcast_v10i64_to_v10f64_scalar(<10 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v18, s7
; SI-NEXT: v_mov_b32_e32 v19, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_4:
-; SI-NEXT: s_branch .LBB37_2
;
; VI-LABEL: bitcast_v10i64_to_v10f64_scalar:
; VI: ; %bb.0:
@@ -17381,7 +16863,7 @@ define inreg <10 x double> @bitcast_v10i64_to_v10f64_scalar(<10 x i64> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s10, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s11, v0
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB37_3
; VI-NEXT: .LBB37_2: ; %cmp.true
@@ -17427,8 +16909,6 @@ define inreg <10 x double> @bitcast_v10i64_to_v10f64_scalar(<10 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v18, s7
; VI-NEXT: v_mov_b32_e32 v19, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v10i64_to_v10f64_scalar:
; GFX9: ; %bb.0:
@@ -17441,7 +16921,7 @@ define inreg <10 x double> @bitcast_v10i64_to_v10f64_scalar(<10 x i64> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s10, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s11, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB37_3
; GFX9-NEXT: .LBB37_2: ; %cmp.true
@@ -17487,8 +16967,6 @@ define inreg <10 x double> @bitcast_v10i64_to_v10f64_scalar(<10 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v18, s7
; GFX9-NEXT: v_mov_b32_e32 v19, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v10i64_to_v10f64_scalar:
; GFX11: ; %bb.0:
@@ -17498,7 +16976,7 @@ define inreg <10 x double> @bitcast_v10i64_to_v10f64_scalar(<10 x i64> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s5, v0
; GFX11-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
@@ -17535,8 +17013,6 @@ define inreg <10 x double> @bitcast_v10i64_to_v10f64_scalar(<10 x i64> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: v_dual_mov_b32 v18, s5 :: v_dual_mov_b32 v19, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_4:
-; GFX11-NEXT: s_branch .LBB37_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17706,9 +17182,9 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB39_3
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_4
+; SI-NEXT: s_cbranch_execnz .LBB39_3
; SI-NEXT: .LBB39_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -17720,10 +17196,8 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; SI-NEXT: v_add_f64 v[14:15], s[50:51], 1.0
; SI-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
; SI-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
-; SI-NEXT: s_branch .LBB39_5
+; SI-NEXT: s_branch .LBB39_4
; SI-NEXT: .LBB39_3:
-; SI-NEXT: s_branch .LBB39_2
-; SI-NEXT: .LBB39_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -17756,7 +17230,7 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB39_5: ; %end
+; SI-NEXT: .LBB39_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -17815,9 +17289,9 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB39_3
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_4
+; VI-NEXT: s_cbranch_execnz .LBB39_3
; VI-NEXT: .LBB39_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -17829,10 +17303,8 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; VI-NEXT: v_add_f64 v[14:15], s[50:51], 1.0
; VI-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
; VI-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
-; VI-NEXT: s_branch .LBB39_5
+; VI-NEXT: s_branch .LBB39_4
; VI-NEXT: .LBB39_3:
-; VI-NEXT: s_branch .LBB39_2
-; VI-NEXT: .LBB39_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -17865,7 +17337,7 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB39_5: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -17924,9 +17396,9 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
+; GFX9-NEXT: s_cbranch_execnz .LBB39_3
; GFX9-NEXT: .LBB39_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -17938,10 +17410,8 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[14:15], s[50:51], 1.0
; GFX9-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
; GFX9-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
-; GFX9-NEXT: s_branch .LBB39_5
+; GFX9-NEXT: s_branch .LBB39_4
; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
-; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -17974,7 +17444,7 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB39_5: ; %end
+; GFX9-NEXT: .LBB39_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -18034,10 +17504,10 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_3
; GFX11-NEXT: .LBB39_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -18049,10 +17519,8 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[14:15], s[50:51], 1.0
; GFX11-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
; GFX11-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
-; GFX11-NEXT: s_branch .LBB39_5
+; GFX11-NEXT: s_branch .LBB39_4
; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
-; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -18069,7 +17537,7 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB39_5: ; %end
+; GFX11-NEXT: .LBB39_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -18704,7 +18172,7 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s9, v1
; SI-NEXT: s_cmp_lg_u32 s8, 0
; SI-NEXT: v_readfirstlane_b32 s8, v0
-; SI-NEXT: s_cbranch_scc0 .LBB41_4
+; SI-NEXT: s_cbranch_scc0 .LBB41_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s72, s5, 16
; SI-NEXT: s_lshr_b32 s73, s7, 16
@@ -18850,28 +18318,6 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v18, s4
; SI-NEXT: v_mov_b32_e32 v19, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB41_4:
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: s_branch .LBB41_2
;
; VI-LABEL: bitcast_v10i64_to_v40i16_scalar:
; VI: ; %bb.0:
@@ -18884,7 +18330,7 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s10, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s11, v0
-; VI-NEXT: s_cbranch_scc0 .LBB41_4
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s12, s6, 16
; VI-NEXT: s_lshr_b32 s13, s7, 16
@@ -19030,28 +18476,6 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v18, s7
; VI-NEXT: v_mov_b32_e32 v19, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB41_4:
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: s_branch .LBB41_2
;
; GFX9-LABEL: bitcast_v10i64_to_v40i16_scalar:
; GFX9: ; %bb.0:
@@ -19064,7 +18488,7 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s10, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s11, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s12, s6, 16
; GFX9-NEXT: s_lshr_b32 s13, s7, 16
@@ -19170,28 +18594,6 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v18, s7
; GFX9-NEXT: v_mov_b32_e32 v19, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB41_4:
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: s_branch .LBB41_2
;
; GFX11-LABEL: bitcast_v10i64_to_v40i16_scalar:
; GFX11: ; %bb.0:
@@ -19201,7 +18603,7 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s5, v0
; GFX11-NEXT: s_mov_b32 s58, 0
; GFX11-NEXT: s_cmp_lg_u32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s6, s4, 16
; GFX11-NEXT: s_lshr_b32 s7, s5, 16
@@ -19299,28 +18701,6 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v16, s9 :: v_dual_mov_b32 v17, s8
; GFX11-NEXT: v_dual_mov_b32 v18, s5 :: v_dual_mov_b32 v19, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB41_4:
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB41_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -20238,7 +19618,7 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; SI-NEXT: v_writelane_b32 v20, s66, 14
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v20, s67, 15
-; SI-NEXT: s_cbranch_scc0 .LBB43_4
+; SI-NEXT: s_cbranch_scc0 .LBB43_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s95, 16
@@ -20444,9 +19824,6 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB43_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB43_2
;
; VI-LABEL: bitcast_v40i16_to_v10i64_scalar:
; VI: ; %bb.0:
@@ -20502,7 +19879,7 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; VI-NEXT: v_writelane_b32 v20, s66, 18
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_writelane_b32 v20, s67, 19
-; VI-NEXT: s_cbranch_scc0 .LBB43_4
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s35, 16
@@ -20712,9 +20089,6 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB43_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB43_2
;
; GFX9-LABEL: bitcast_v40i16_to_v10i64_scalar:
; GFX9: ; %bb.0:
@@ -20782,9 +20156,9 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s53, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s54, s58, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s55, s56, s57
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
+; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -20806,10 +20180,8 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v17, s53, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v18, s54, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v19, s55, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB43_5
+; GFX9-NEXT: s_branch .LBB43_4
; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
-; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -20842,7 +20214,7 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB43_5: ; %end
+; GFX9-NEXT: .LBB43_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -20909,10 +20281,10 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
; GFX11-NEXT: s_pack_ll_b32_b16 s18, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s19, s45, s58
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-NEXT: .LBB43_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -20936,8 +20308,6 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v19, s19, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
-; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -21572,7 +20942,7 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s9, v1
; SI-NEXT: s_cmp_lg_u32 s8, 0
; SI-NEXT: v_readfirstlane_b32 s8, v0
-; SI-NEXT: s_cbranch_scc0 .LBB45_4
+; SI-NEXT: s_cbranch_scc0 .LBB45_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s72, s5, 16
; SI-NEXT: s_lshr_b32 s73, s7, 16
@@ -21718,28 +21088,6 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v18, s4
; SI-NEXT: v_mov_b32_e32 v19, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB45_4:
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: s_branch .LBB45_2
;
; VI-LABEL: bitcast_v10i64_to_v40f16_scalar:
; VI: ; %bb.0:
@@ -21752,7 +21100,7 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s10, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s11, v0
-; VI-NEXT: s_cbranch_scc0 .LBB45_4
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s12, s6, 16
; VI-NEXT: s_lshr_b32 s13, s7, 16
@@ -21898,28 +21246,6 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v18, s7
; VI-NEXT: v_mov_b32_e32 v19, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_4:
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: s_branch .LBB45_2
;
; GFX9-LABEL: bitcast_v10i64_to_v40f16_scalar:
; GFX9: ; %bb.0:
@@ -21932,7 +21258,7 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s10, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s11, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s12, s6, 16
; GFX9-NEXT: s_lshr_b32 s13, s7, 16
@@ -22038,28 +21364,6 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v18, s7
; GFX9-NEXT: v_mov_b32_e32 v19, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_4:
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: s_branch .LBB45_2
;
; GFX11-LABEL: bitcast_v10i64_to_v40f16_scalar:
; GFX11: ; %bb.0:
@@ -22069,7 +21373,7 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s5, v0
; GFX11-NEXT: s_mov_b32 s58, 0
; GFX11-NEXT: s_cmp_lg_u32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s6, s4, 16
; GFX11-NEXT: s_lshr_b32 s7, s5, 16
@@ -22167,28 +21471,6 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v16, s9 :: v_dual_mov_b32 v17, s8
; GFX11-NEXT: v_dual_mov_b32 v18, s5 :: v_dual_mov_b32 v19, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_4:
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB45_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -23191,7 +22473,7 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; SI-NEXT: v_writelane_b32 v32, s66, 14
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v32, s67, 15
-; SI-NEXT: s_cbranch_scc0 .LBB47_3
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s95, 16
@@ -23253,7 +22535,7 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s55, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB47_4
+; SI-NEXT: s_cbranch_execnz .LBB47_3
; SI-NEXT: .LBB47_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s95
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -23415,11 +22697,8 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v18, v19, v18
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v20
; SI-NEXT: v_or_b32_e32 v19, v21, v19
-; SI-NEXT: s_branch .LBB47_5
+; SI-NEXT: s_branch .LBB47_4
; SI-NEXT: .LBB47_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB47_2
-; SI-NEXT: .LBB47_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -23452,7 +22731,7 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB47_5: ; %end
+; SI-NEXT: .LBB47_4: ; %end
; SI-NEXT: v_readlane_b32 s67, v32, 15
; SI-NEXT: v_readlane_b32 s66, v32, 14
; SI-NEXT: v_readlane_b32 s65, v32, 13
@@ -23529,7 +22808,7 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; VI-NEXT: v_writelane_b32 v32, s66, 18
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_writelane_b32 v32, s67, 19
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s35, 16
@@ -23591,7 +22870,7 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s55, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB47_4
+; VI-NEXT: s_cbranch_execnz .LBB47_3
; VI-NEXT: .LBB47_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v19, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s35
@@ -23674,11 +22953,8 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; VI-NEXT: v_add_f16_sdwa v20, v20, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v19, s6, v19
; VI-NEXT: v_or_b32_e32 v19, v19, v20
-; VI-NEXT: s_branch .LBB47_5
+; VI-NEXT: s_branch .LBB47_4
; VI-NEXT: .LBB47_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB47_2
-; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -23711,7 +22987,7 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB47_5: ; %end
+; VI-NEXT: .LBB47_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v32, 19
; VI-NEXT: v_readlane_b32 s66, v32, 18
; VI-NEXT: v_readlane_b32 s65, v32, 17
@@ -23804,9 +23080,9 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s53, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s54, s58, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s55, s56, s57
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
+; GFX9-NEXT: s_cbranch_execnz .LBB47_3
; GFX9-NEXT: .LBB47_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v19, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v19 op_sel_hi:[1,0]
@@ -23829,10 +23105,8 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v17, s53, v19 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v18, s54, v19 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v19, s55, v19 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB47_5
+; GFX9-NEXT: s_branch .LBB47_4
; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
-; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -23865,7 +23139,7 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB47_5: ; %end
+; GFX9-NEXT: .LBB47_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -23932,10 +23206,10 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
; GFX11-NEXT: s_pack_ll_b32_b16 s18, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s19, s45, s58
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-NEXT: .LBB47_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -23959,8 +23233,6 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v19, 0x200, s19 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -24535,7 +23807,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s5, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v0
-; SI-NEXT: s_cbranch_scc0 .LBB49_3
+; SI-NEXT: s_cbranch_scc0 .LBB49_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s89, s9, 16
; SI-NEXT: s_lshr_b32 s88, s7, 16
@@ -24557,7 +23829,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; SI-NEXT: s_lshr_b64 s[56:57], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[58:59], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB49_4
+; SI-NEXT: s_cbranch_execnz .LBB49_3
; SI-NEXT: .LBB49_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[18:19], s[8:9], 1.0
; SI-NEXT: v_add_f64 v[16:17], s[6:7], 1.0
@@ -24589,30 +23861,8 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; SI-NEXT: s_branch .LBB49_5
+; SI-NEXT: s_branch .LBB49_4
; SI-NEXT: .LBB49_3:
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: s_branch .LBB49_2
-; SI-NEXT: .LBB49_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -24653,7 +23903,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v22, s14
; SI-NEXT: v_mov_b32_e32 v21, s12
; SI-NEXT: v_mov_b32_e32 v20, s10
-; SI-NEXT: .LBB49_5: ; %end
+; SI-NEXT: .LBB49_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28
@@ -24727,7 +23977,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s5, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s4, v0
-; VI-NEXT: s_cbranch_scc0 .LBB49_3
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s12, s9, 16
; VI-NEXT: s_lshr_b32 s46, s8, 16
@@ -24749,7 +23999,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; VI-NEXT: s_lshr_b32 s62, s18, 16
; VI-NEXT: s_lshr_b32 s45, s17, 16
; VI-NEXT: s_lshr_b32 s63, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB49_4
+; VI-NEXT: s_cbranch_execnz .LBB49_3
; VI-NEXT: .LBB49_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[18:19], s[8:9], 1.0
; VI-NEXT: v_add_f64 v[16:17], s[6:7], 1.0
@@ -24781,30 +24031,8 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; VI-NEXT: s_branch .LBB49_5
+; VI-NEXT: s_branch .LBB49_4
; VI-NEXT: .LBB49_3:
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: s_branch .LBB49_2
-; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -24845,7 +24073,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v26, s14
; VI-NEXT: v_mov_b32_e32 v24, s13
; VI-NEXT: v_mov_b32_e32 v22, s12
-; VI-NEXT: .LBB49_5: ; %end
+; VI-NEXT: .LBB49_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v37, 16, v37
; VI-NEXT: v_lshlrev_b32_e32 v35, 16, v35
; VI-NEXT: v_lshlrev_b32_e32 v33, 16, v33
@@ -24899,7 +24127,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s5, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s12, s9, 16
; GFX9-NEXT: s_lshr_b32 s46, s8, 16
@@ -24921,7 +24149,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; GFX9-NEXT: s_lshr_b32 s62, s18, 16
; GFX9-NEXT: s_lshr_b32 s45, s17, 16
; GFX9-NEXT: s_lshr_b32 s63, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
+; GFX9-NEXT: s_cbranch_execnz .LBB49_3
; GFX9-NEXT: .LBB49_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[18:19], s[8:9], 1.0
; GFX9-NEXT: v_add_f64 v[16:17], s[6:7], 1.0
@@ -24953,30 +24181,8 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; GFX9-NEXT: s_branch .LBB49_5
+; GFX9-NEXT: s_branch .LBB49_4
; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: s_branch .LBB49_2
-; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -25017,7 +24223,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v26, s14
; GFX9-NEXT: v_mov_b32_e32 v24, s13
; GFX9-NEXT: v_mov_b32_e32 v22, s12
-; GFX9-NEXT: .LBB49_5: ; %end
+; GFX9-NEXT: .LBB49_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -25068,7 +24274,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s6, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s5, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s4, 16
@@ -25091,7 +24297,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s58, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], s[4:5], 1.0
; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], s[28:29], 1.0
@@ -25123,30 +24329,8 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB49_5
+; GFX11-TRUE16-NEXT: s_branch .LBB49_4
; GFX11-TRUE16-NEXT: .LBB49_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
-; GFX11-TRUE16-NEXT: s_branch .LBB49_2
-; GFX11-TRUE16-NEXT: .LBB49_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -25167,7 +24351,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s43 :: v_dual_mov_b32 v25, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s42 :: v_dual_mov_b32 v23, s8
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s41 :: v_dual_mov_b32 v21, s7
-; GFX11-TRUE16-NEXT: .LBB49_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB49_4: ; %end
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, v34 :: v_dual_mov_b32 v33, v33
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v24, v24
@@ -25212,7 +24396,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s6, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s5, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s4, 16
@@ -25235,7 +24419,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s58, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f64 v[15:16], s[4:5], 1.0
; GFX11-FAKE16-NEXT: v_add_f64 v[17:18], s[28:29], 1.0
@@ -25267,30 +24451,8 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v0
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v3
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
-; GFX11-FAKE16-NEXT: s_branch .LBB49_5
+; GFX11-FAKE16-NEXT: s_branch .LBB49_4
; GFX11-FAKE16-NEXT: .LBB49_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
-; GFX11-FAKE16-NEXT: s_branch .LBB49_2
-; GFX11-FAKE16-NEXT: .LBB49_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v21, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v7, s18
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s20 :: v_dual_mov_b32 v12, s22
@@ -25311,7 +24473,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s13 :: v_dual_mov_b32 v31, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s10 :: v_dual_mov_b32 v27, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s8 :: v_dual_mov_b32 v25, s7
-; GFX11-FAKE16-NEXT: .LBB49_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB49_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v0
@@ -26270,7 +25432,7 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; SI-NEXT: v_writelane_b32 v20, s66, 14
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v20, s67, 15
-; SI-NEXT: s_cbranch_scc0 .LBB51_4
+; SI-NEXT: s_cbranch_scc0 .LBB51_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s95, 16
@@ -26476,9 +25638,6 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB51_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB51_2
;
; VI-LABEL: bitcast_v40i16_to_v10f64_scalar:
; VI: ; %bb.0:
@@ -26534,7 +25693,7 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; VI-NEXT: v_writelane_b32 v20, s66, 18
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_writelane_b32 v20, s67, 19
-; VI-NEXT: s_cbranch_scc0 .LBB51_4
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s35, 16
@@ -26744,9 +25903,6 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB51_2
;
; GFX9-LABEL: bitcast_v40i16_to_v10f64_scalar:
; GFX9: ; %bb.0:
@@ -26814,9 +25970,9 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s53, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s54, s58, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s55, s56, s57
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB51_4
+; GFX9-NEXT: s_cbranch_execnz .LBB51_3
; GFX9-NEXT: .LBB51_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -26838,10 +25994,8 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v17, s53, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v18, s54, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v19, s55, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB51_5
+; GFX9-NEXT: s_branch .LBB51_4
; GFX9-NEXT: .LBB51_3:
-; GFX9-NEXT: s_branch .LBB51_2
-; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -26874,7 +26028,7 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB51_5: ; %end
+; GFX9-NEXT: .LBB51_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -26941,10 +26095,10 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
; GFX11-NEXT: s_pack_ll_b32_b16 s18, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s19, s45, s58
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-NEXT: .LBB51_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -26968,8 +26122,6 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v19, s19, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
-; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -27544,7 +26696,7 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; SI-NEXT: v_readfirstlane_b32 s5, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v0
-; SI-NEXT: s_cbranch_scc0 .LBB53_3
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s89, s9, 16
; SI-NEXT: s_lshr_b32 s88, s7, 16
@@ -27566,7 +26718,7 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; SI-NEXT: s_lshr_b64 s[56:57], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[58:59], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB53_4
+; SI-NEXT: s_cbranch_execnz .LBB53_3
; SI-NEXT: .LBB53_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[18:19], s[8:9], 1.0
; SI-NEXT: v_add_f64 v[16:17], s[6:7], 1.0
@@ -27598,30 +26750,8 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; SI-NEXT: s_branch .LBB53_5
+; SI-NEXT: s_branch .LBB53_4
; SI-NEXT: .LBB53_3:
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr73
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr75
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: s_branch .LBB53_2
-; SI-NEXT: .LBB53_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -27662,7 +26792,7 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; SI-NEXT: v_mov_b32_e32 v22, s14
; SI-NEXT: v_mov_b32_e32 v21, s12
; SI-NEXT: v_mov_b32_e32 v20, s10
-; SI-NEXT: .LBB53_5: ; %end
+; SI-NEXT: .LBB53_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28
@@ -27736,7 +26866,7 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; VI-NEXT: v_readfirstlane_b32 s5, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s4, v0
-; VI-NEXT: s_cbranch_scc0 .LBB53_3
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s12, s9, 16
; VI-NEXT: s_lshr_b32 s46, s8, 16
@@ -27758,7 +26888,7 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; VI-NEXT: s_lshr_b32 s62, s18, 16
; VI-NEXT: s_lshr_b32 s45, s17, 16
; VI-NEXT: s_lshr_b32 s63, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB53_4
+; VI-NEXT: s_cbranch_execnz .LBB53_3
; VI-NEXT: .LBB53_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[18:19], s[8:9], 1.0
; VI-NEXT: v_add_f64 v[16:17], s[6:7], 1.0
@@ -27790,30 +26920,8 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; VI-NEXT: s_branch .LBB53_5
+; VI-NEXT: s_branch .LBB53_4
; VI-NEXT: .LBB53_3:
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: s_branch .LBB53_2
-; VI-NEXT: .LBB53_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -27854,7 +26962,7 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; VI-NEXT: v_mov_b32_e32 v26, s14
; VI-NEXT: v_mov_b32_e32 v24, s13
; VI-NEXT: v_mov_b32_e32 v22, s12
-; VI-NEXT: .LBB53_5: ; %end
+; VI-NEXT: .LBB53_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v37, 16, v37
; VI-NEXT: v_lshlrev_b32_e32 v35, 16, v35
; VI-NEXT: v_lshlrev_b32_e32 v33, 16, v33
@@ -27908,7 +27016,7 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; GFX9-NEXT: v_readfirstlane_b32 s5, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s12, s9, 16
; GFX9-NEXT: s_lshr_b32 s46, s8, 16
@@ -27930,7 +27038,7 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; GFX9-NEXT: s_lshr_b32 s62, s18, 16
; GFX9-NEXT: s_lshr_b32 s45, s17, 16
; GFX9-NEXT: s_lshr_b32 s63, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB53_4
+; GFX9-NEXT: s_cbranch_execnz .LBB53_3
; GFX9-NEXT: .LBB53_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[18:19], s[8:9], 1.0
; GFX9-NEXT: v_add_f64 v[16:17], s[6:7], 1.0
@@ -27962,30 +27070,8 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; GFX9-NEXT: s_branch .LBB53_5
+; GFX9-NEXT: s_branch .LBB53_4
; GFX9-NEXT: .LBB53_3:
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: s_branch .LBB53_2
-; GFX9-NEXT: .LBB53_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -28026,7 +27112,7 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v26, s14
; GFX9-NEXT: v_mov_b32_e32 v24, s13
; GFX9-NEXT: v_mov_b32_e32 v22, s12
-; GFX9-NEXT: .LBB53_5: ; %end
+; GFX9-NEXT: .LBB53_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -28077,7 +27163,7 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s6, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s5, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s4, 16
@@ -28100,7 +27186,7 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s58, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], s[4:5], 1.0
; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], s[28:29], 1.0
@@ -28132,30 +27218,8 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB53_5
+; GFX11-TRUE16-NEXT: s_branch .LBB53_4
; GFX11-TRUE16-NEXT: .LBB53_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
-; GFX11-TRUE16-NEXT: s_branch .LBB53_2
-; GFX11-TRUE16-NEXT: .LBB53_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -28176,7 +27240,7 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s43 :: v_dual_mov_b32 v25, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s42 :: v_dual_mov_b32 v23, s8
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s41 :: v_dual_mov_b32 v21, s7
-; GFX11-TRUE16-NEXT: .LBB53_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB53_4: ; %end
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, v34 :: v_dual_mov_b32 v33, v33
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v24, v24
@@ -28221,7 +27285,7 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s6, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s5, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s4, 16
@@ -28244,7 +27308,7 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s58, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f64 v[15:16], s[4:5], 1.0
; GFX11-FAKE16-NEXT: v_add_f64 v[17:18], s[28:29], 1.0
@@ -28276,30 +27340,8 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v0
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v3
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
-; GFX11-FAKE16-NEXT: s_branch .LBB53_5
+; GFX11-FAKE16-NEXT: s_branch .LBB53_4
; GFX11-FAKE16-NEXT: .LBB53_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
-; GFX11-FAKE16-NEXT: s_branch .LBB53_2
-; GFX11-FAKE16-NEXT: .LBB53_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v21, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v7, s18
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s20 :: v_dual_mov_b32 v12, s22
@@ -28320,7 +27362,7 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s13 :: v_dual_mov_b32 v31, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s10 :: v_dual_mov_b32 v27, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s8 :: v_dual_mov_b32 v25, s7
-; GFX11-FAKE16-NEXT: .LBB53_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB53_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v0
@@ -29364,7 +28406,7 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; SI-NEXT: v_writelane_b32 v32, s66, 14
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v32, s67, 15
-; SI-NEXT: s_cbranch_scc0 .LBB55_3
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s95, 16
@@ -29426,7 +28468,7 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s55, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB55_4
+; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s95
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -29588,11 +28630,8 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; SI-NEXT: v_or_b32_e32 v18, v19, v18
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v20
; SI-NEXT: v_or_b32_e32 v19, v21, v19
-; SI-NEXT: s_branch .LBB55_5
+; SI-NEXT: s_branch .LBB55_4
; SI-NEXT: .LBB55_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB55_2
-; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -29625,7 +28664,7 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB55_5: ; %end
+; SI-NEXT: .LBB55_4: ; %end
; SI-NEXT: v_readlane_b32 s67, v32, 15
; SI-NEXT: v_readlane_b32 s66, v32, 14
; SI-NEXT: v_readlane_b32 s65, v32, 13
@@ -29702,7 +28741,7 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; VI-NEXT: v_writelane_b32 v32, s66, 18
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_writelane_b32 v32, s67, 19
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s35, 16
@@ -29764,7 +28803,7 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s55, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB55_4
+; VI-NEXT: s_cbranch_execnz .LBB55_3
; VI-NEXT: .LBB55_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v19, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s35
@@ -29847,11 +28886,8 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; VI-NEXT: v_add_f16_sdwa v20, v20, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v19, s6, v19
; VI-NEXT: v_or_b32_e32 v19, v19, v20
-; VI-NEXT: s_branch .LBB55_5
+; VI-NEXT: s_branch .LBB55_4
; VI-NEXT: .LBB55_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB55_2
-; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -29884,7 +28920,7 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB55_5: ; %end
+; VI-NEXT: .LBB55_4: ; %end
; VI-NEXT: v_readlane_b32 s67, v32, 19
; VI-NEXT: v_readlane_b32 s66, v32, 18
; VI-NEXT: v_readlane_b32 s65, v32, 17
@@ -29977,9 +29013,9 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX9-NEXT: s_pack_ll_b32_b16 s53, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s54, s58, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s55, s56, s57
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
+; GFX9-NEXT: s_cbranch_execnz .LBB55_3
; GFX9-NEXT: .LBB55_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v19, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v19 op_sel_hi:[1,0]
@@ -30002,10 +29038,8 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX9-NEXT: v_pk_add_f16 v17, s53, v19 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v18, s54, v19 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v19, s55, v19 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB55_5
+; GFX9-NEXT: s_branch .LBB55_4
; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
-; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -30038,7 +29072,7 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB55_5: ; %end
+; GFX9-NEXT: .LBB55_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -30105,10 +29139,10 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
; GFX11-NEXT: s_pack_ll_b32_b16 s18, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s19, s45, s58
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-NEXT: .LBB55_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -30132,8 +29166,6 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-NEXT: v_pk_add_f16 v19, 0x200, s19 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
-; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -31096,7 +30128,7 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; SI-NEXT: s_lshr_b32 s64, s65, 16
; SI-NEXT: v_readfirstlane_b32 s4, v6
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB57_4
+; SI-NEXT: s_cbranch_scc0 .LBB57_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s5, s17, 0xffff
; SI-NEXT: s_lshl_b32 s7, s94, 16
@@ -31410,28 +30442,6 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB57_4:
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: s_branch .LBB57_2
;
; VI-LABEL: bitcast_v40i16_to_v40f16_scalar:
; VI: ; %bb.0:
@@ -31464,7 +30474,7 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; VI-NEXT: s_lshr_b32 s47, s46, 16
; VI-NEXT: v_readfirstlane_b32 s4, v6
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB57_3
; VI-NEXT: .LBB57_2: ; %cmp.true
@@ -31590,8 +30600,6 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v18, s9
; VI-NEXT: v_mov_b32_e32 v19, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v40i16_to_v40f16_scalar:
; GFX9: ; %bb.0:
@@ -31624,9 +30632,9 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s44, s58, 16
; GFX9-NEXT: v_readfirstlane_b32 s4, v6
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
+; GFX9-NEXT: s_cbranch_execnz .LBB57_3
; GFX9-NEXT: .LBB57_2: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s63, s57
; GFX9-NEXT: v_pk_add_u16 v19, s4, 3 op_sel_hi:[1,0]
@@ -31688,10 +30696,8 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v22, 16, v17
; GFX9-NEXT: v_lshrrev_b32_e32 v21, 16, v18
; GFX9-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX9-NEXT: s_branch .LBB57_5
+; GFX9-NEXT: s_branch .LBB57_4
; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
-; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v19, s63
; GFX9-NEXT: v_mov_b32_e32 v18, s62
; GFX9-NEXT: v_mov_b32_e32 v17, s61
@@ -31732,7 +30738,7 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v37, s8
; GFX9-NEXT: v_mov_b32_e32 v38, s7
; GFX9-NEXT: v_mov_b32_e32 v39, s6
-; GFX9-NEXT: .LBB57_5: ; %end
+; GFX9-NEXT: .LBB57_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -31803,10 +30809,10 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s56, 16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s58, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s58, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s58
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s47, s57, s47
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s46, s56, s46
@@ -31868,10 +30874,8 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX11-TRUE16-NEXT: s_branch .LBB57_5
+; GFX11-TRUE16-NEXT: s_branch .LBB57_4
; GFX11-TRUE16-NEXT: .LBB57_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB57_2
-; GFX11-TRUE16-NEXT: .LBB57_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, s57 :: v_dual_mov_b32 v18, s56
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
@@ -31892,7 +30896,7 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s9 :: v_dual_mov_b32 v35, s8
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s6 :: v_dual_mov_b32 v37, s4
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s5 :: v_dual_mov_b32 v39, s7
-; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB57_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
@@ -31954,10 +30958,10 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s56, 16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s58, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s58, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s58
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-FAKE16-NEXT: .LBB57_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s47, s57, s47
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s46, s56, s46
@@ -32019,10 +31023,8 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v16
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-FAKE16-NEXT: s_branch .LBB57_5
+; GFX11-FAKE16-NEXT: s_branch .LBB57_4
; GFX11-FAKE16-NEXT: .LBB57_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB57_2
-; GFX11-FAKE16-NEXT: .LBB57_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s57 :: v_dual_mov_b32 v16, s56
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v18, s28
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, s27 :: v_dual_mov_b32 v10, s26
@@ -32043,7 +31045,7 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s9 :: v_dual_mov_b32 v35, s8
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v36, s6 :: v_dual_mov_b32 v37, s4
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v38, s5 :: v_dual_mov_b32 v39, s7
-; GFX11-FAKE16-NEXT: .LBB57_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB57_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v1
@@ -32803,9 +31805,9 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill
-; SI-NEXT: s_cbranch_scc0 .LBB59_3
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB59_4
+; SI-NEXT: s_cbranch_execnz .LBB59_3
; SI-NEXT: .LBB59_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s63
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -32978,10 +31980,8 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; SI-NEXT: v_lshr_b64 v[24:25], v[14:15], 16
; SI-NEXT: v_lshr_b64 v[22:23], v[16:17], 16
; SI-NEXT: v_lshr_b64 v[20:21], v[18:19], 16
-; SI-NEXT: s_branch .LBB59_5
+; SI-NEXT: s_branch .LBB59_4
; SI-NEXT: .LBB59_3:
-; SI-NEXT: s_branch .LBB59_2
-; SI-NEXT: .LBB59_4:
; SI-NEXT: v_mov_b32_e32 v50, s58
; SI-NEXT: v_mov_b32_e32 v40, s43
; SI-NEXT: v_mov_b32_e32 v51, s47
@@ -33026,7 +32026,7 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v24, s56
; SI-NEXT: v_mov_b32_e32 v22, s45
; SI-NEXT: v_mov_b32_e32 v20, s44
-; SI-NEXT: .LBB59_5: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v38
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v48
; SI-NEXT: v_or_b32_e32 v0, v2, v0
@@ -33133,9 +32133,9 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s63, s62, 16
; VI-NEXT: v_readfirstlane_b32 s4, v6
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
+; VI-NEXT: s_cbranch_execnz .LBB59_3
; VI-NEXT: .LBB59_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v20, 0x200
; VI-NEXT: v_add_f16_e32 v0, s16, v20
@@ -33178,10 +32178,8 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v21, s47, v20
; VI-NEXT: v_add_f16_e32 v19, s44, v20
; VI-NEXT: v_add_f16_e32 v20, s45, v20
-; VI-NEXT: s_branch .LBB59_5
+; VI-NEXT: s_branch .LBB59_4
; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
-; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v20, s45
; VI-NEXT: v_mov_b32_e32 v19, s44
; VI-NEXT: v_mov_b32_e32 v21, s47
@@ -33222,7 +32220,7 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v39, s43
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: .LBB59_5: ; %end
+; VI-NEXT: .LBB59_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v39, 16, v39
; VI-NEXT: v_lshlrev_b32_e32 v38, 16, v38
; VI-NEXT: v_lshlrev_b32_e32 v37, 16, v37
@@ -33296,9 +32294,9 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s44, s58, 16
; GFX9-NEXT: v_readfirstlane_b32 s4, v6
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
+; GFX9-NEXT: s_cbranch_execnz .LBB59_3
; GFX9-NEXT: .LBB59_2: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s63, s57
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
@@ -33361,10 +32359,8 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v22, 16, v17
; GFX9-NEXT: v_lshrrev_b32_e32 v21, 16, v18
; GFX9-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX9-NEXT: s_branch .LBB59_5
+; GFX9-NEXT: s_branch .LBB59_4
; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
-; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v19, s63
; GFX9-NEXT: v_mov_b32_e32 v18, s62
; GFX9-NEXT: v_mov_b32_e32 v17, s61
@@ -33405,7 +32401,7 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v37, s8
; GFX9-NEXT: v_mov_b32_e32 v38, s7
; GFX9-NEXT: v_mov_b32_e32 v39, s6
-; GFX9-NEXT: .LBB59_5: ; %end
+; GFX9-NEXT: .LBB59_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -33476,10 +32472,10 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s56, 16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s58, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s58, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s58
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s47, s57, s47
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s46, s56, s46
@@ -33541,10 +32537,8 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX11-TRUE16-NEXT: s_branch .LBB59_5
+; GFX11-TRUE16-NEXT: s_branch .LBB59_4
; GFX11-TRUE16-NEXT: .LBB59_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB59_2
-; GFX11-TRUE16-NEXT: .LBB59_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, s57 :: v_dual_mov_b32 v18, s56
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
@@ -33565,7 +32559,7 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s9 :: v_dual_mov_b32 v35, s8
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s6 :: v_dual_mov_b32 v37, s4
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s5 :: v_dual_mov_b32 v39, s7
-; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB59_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
@@ -33627,10 +32621,10 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s56, 16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s58, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s58, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s58
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-FAKE16-NEXT: .LBB59_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s47, s57, s47
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s46, s56, s46
@@ -33692,10 +32686,8 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v16
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-FAKE16-NEXT: s_branch .LBB59_5
+; GFX11-FAKE16-NEXT: s_branch .LBB59_4
; GFX11-FAKE16-NEXT: .LBB59_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB59_2
-; GFX11-FAKE16-NEXT: .LBB59_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s57 :: v_dual_mov_b32 v16, s56
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v18, s28
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, s27 :: v_dual_mov_b32 v10, s26
@@ -33716,7 +32708,7 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s9 :: v_dual_mov_b32 v35, s8
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v36, s6 :: v_dual_mov_b32 v37, s4
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v38, s5 :: v_dual_mov_b32 v39, s7
-; GFX11-FAKE16-NEXT: .LBB59_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB59_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v1
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
index 45e835ddb0e28..f61959b450292 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
@@ -86,7 +86,7 @@ define inreg double @bitcast_i64_to_f64_scalar(i64 inreg %a, i32 inreg %b) {
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -96,14 +96,12 @@ define inreg double @bitcast_i64_to_f64_scalar(i64 inreg %a, i32 inreg %b) {
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_i64_to_f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -113,14 +111,12 @@ define inreg double @bitcast_i64_to_f64_scalar(i64 inreg %a, i32 inreg %b) {
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_i64_to_f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -130,15 +126,13 @@ define inreg double @bitcast_i64_to_f64_scalar(i64 inreg %a, i32 inreg %b) {
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_i64_to_f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -149,8 +143,6 @@ define inreg double @bitcast_i64_to_f64_scalar(i64 inreg %a, i32 inreg %b) {
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -244,15 +236,13 @@ define inreg i64 @bitcast_f64_to_i64_scalar(double inreg %a, i32 inreg %b) {
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -261,15 +251,13 @@ define inreg i64 @bitcast_f64_to_i64_scalar(double inreg %a, i32 inreg %b) {
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -278,15 +266,13 @@ define inreg i64 @bitcast_f64_to_i64_scalar(double inreg %a, i32 inreg %b) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -296,16 +282,14 @@ define inreg i64 @bitcast_f64_to_i64_scalar(double inreg %a, i32 inreg %b) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -405,7 +389,7 @@ define inreg <2 x i32> @bitcast_i64_to_v2i32_scalar(i64 inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB5_3
; SI-NEXT: .LBB5_2: ; %cmp.true
@@ -415,14 +399,12 @@ define inreg <2 x i32> @bitcast_i64_to_v2i32_scalar(i64 inreg %a, i32 inreg %b)
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_i64_to_v2i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -432,14 +414,12 @@ define inreg <2 x i32> @bitcast_i64_to_v2i32_scalar(i64 inreg %a, i32 inreg %b)
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_i64_to_v2i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -449,15 +429,13 @@ define inreg <2 x i32> @bitcast_i64_to_v2i32_scalar(i64 inreg %a, i32 inreg %b)
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_i64_to_v2i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -468,8 +446,6 @@ define inreg <2 x i32> @bitcast_i64_to_v2i32_scalar(i64 inreg %a, i32 inreg %b)
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -566,7 +542,7 @@ define inreg i64 @bitcast_v2i32_to_i64_scalar(<2 x i32> inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB7_3
; SI-NEXT: .LBB7_2: ; %cmp.true
@@ -576,14 +552,12 @@ define inreg i64 @bitcast_v2i32_to_i64_scalar(<2 x i32> inreg %a, i32 inreg %b)
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v2i32_to_i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -593,14 +567,12 @@ define inreg i64 @bitcast_v2i32_to_i64_scalar(<2 x i32> inreg %a, i32 inreg %b)
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v2i32_to_i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
@@ -610,15 +582,13 @@ define inreg i64 @bitcast_v2i32_to_i64_scalar(<2 x i32> inreg %a, i32 inreg %b)
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v2i32_to_i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
@@ -629,8 +599,6 @@ define inreg i64 @bitcast_v2i32_to_i64_scalar(<2 x i32> inreg %a, i32 inreg %b)
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -728,7 +696,7 @@ define inreg <2 x float> @bitcast_i64_to_v2f32_scalar(i64 inreg %a, i32 inreg %b
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB9_3
; SI-NEXT: .LBB9_2: ; %cmp.true
@@ -738,14 +706,12 @@ define inreg <2 x float> @bitcast_i64_to_v2f32_scalar(i64 inreg %a, i32 inreg %b
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_i64_to_v2f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -755,14 +721,12 @@ define inreg <2 x float> @bitcast_i64_to_v2f32_scalar(i64 inreg %a, i32 inreg %b
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_i64_to_v2f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -772,15 +736,13 @@ define inreg <2 x float> @bitcast_i64_to_v2f32_scalar(i64 inreg %a, i32 inreg %b
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_i64_to_v2f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -791,8 +753,6 @@ define inreg <2 x float> @bitcast_i64_to_v2f32_scalar(i64 inreg %a, i32 inreg %b
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -888,16 +848,14 @@ define inreg i64 @bitcast_v2f32_to_i64_scalar(<2 x float> inreg %a, i32 inreg %b
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB11_3:
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -906,16 +864,14 @@ define inreg i64 @bitcast_v2f32_to_i64_scalar(<2 x float> inreg %a, i32 inreg %b
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -924,16 +880,14 @@ define inreg i64 @bitcast_v2f32_to_i64_scalar(<2 x float> inreg %a, i32 inreg %b
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -943,17 +897,15 @@ define inreg i64 @bitcast_v2f32_to_i64_scalar(<2 x float> inreg %a, i32 inreg %b
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -1067,7 +1019,7 @@ define inreg <4 x i16> @bitcast_i64_to_v4i16_scalar(i64 inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB13_4
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s8, s17, 16
; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16
@@ -1087,16 +1039,12 @@ define inreg <4 x i16> @bitcast_i64_to_v4i16_scalar(i64 inreg %a, i32 inreg %b)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB13_4:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB13_2
;
; VI-LABEL: bitcast_i64_to_v4i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
@@ -1106,14 +1054,12 @@ define inreg <4 x i16> @bitcast_i64_to_v4i16_scalar(i64 inreg %a, i32 inreg %b)
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_i64_to_v4i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
@@ -1123,15 +1069,13 @@ define inreg <4 x i16> @bitcast_i64_to_v4i16_scalar(i64 inreg %a, i32 inreg %b)
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_i64_to_v4i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
@@ -1142,8 +1086,6 @@ define inreg <4 x i16> @bitcast_i64_to_v4i16_scalar(i64 inreg %a, i32 inreg %b)
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1276,7 +1218,7 @@ define inreg i64 @bitcast_v4i16_to_i64_scalar(<4 x i16> inreg %a, i32 inreg %b)
; SI-NEXT: s_lshr_b32 s8, s17, 16
; SI-NEXT: s_lshr_b32 s9, s16, 16
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s9, 16
@@ -1300,15 +1242,12 @@ define inreg i64 @bitcast_v4i16_to_i64_scalar(<4 x i16> inreg %a, i32 inreg %b)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v4i16_to_i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB15_3
; VI-NEXT: .LBB15_2: ; %cmp.true
@@ -1326,23 +1265,19 @@ define inreg i64 @bitcast_v4i16_to_i64_scalar(<4 x i16> inreg %a, i32 inreg %b)
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v4i16_to_i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -1352,17 +1287,15 @@ define inreg i64 @bitcast_v4i16_to_i64_scalar(<4 x i16> inreg %a, i32 inreg %b)
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -1476,7 +1409,7 @@ define inreg <4 x half> @bitcast_i64_to_v4f16_scalar(i64 inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB17_4
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s8, s17, 16
; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16
@@ -1496,16 +1429,12 @@ define inreg <4 x half> @bitcast_i64_to_v4f16_scalar(i64 inreg %a, i32 inreg %b)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB17_4:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB17_2
;
; VI-LABEL: bitcast_i64_to_v4f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
@@ -1515,14 +1444,12 @@ define inreg <4 x half> @bitcast_i64_to_v4f16_scalar(i64 inreg %a, i32 inreg %b)
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_i64_to_v4f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
@@ -1532,15 +1459,13 @@ define inreg <4 x half> @bitcast_i64_to_v4f16_scalar(i64 inreg %a, i32 inreg %b)
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_i64_to_v4f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
@@ -1551,8 +1476,6 @@ define inreg <4 x half> @bitcast_i64_to_v4f16_scalar(i64 inreg %a, i32 inreg %b)
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1694,7 +1617,7 @@ define inreg i64 @bitcast_v4f16_to_i64_scalar(<4 x half> inreg %a, i32 inreg %b)
; SI-NEXT: s_lshr_b32 s8, s17, 16
; SI-NEXT: s_lshr_b32 s9, s16, 16
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s9, 16
@@ -1702,7 +1625,7 @@ define inreg i64 @bitcast_v4f16_to_i64_scalar(<4 x half> inreg %a, i32 inreg %b)
; SI-NEXT: s_and_b32 s5, s17, 0xffff
; SI-NEXT: s_lshl_b32 s6, s8, 16
; SI-NEXT: s_or_b32 s5, s5, s6
-; SI-NEXT: s_cbranch_execnz .LBB19_4
+; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s9
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -1722,9 +1645,6 @@ define inreg i64 @bitcast_v4f16_to_i64_scalar(<4 x half> inreg %a, i32 inreg %b)
; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB19_2
-; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -1733,9 +1653,9 @@ define inreg i64 @bitcast_v4f16_to_i64_scalar(<4 x half> inreg %a, i32 inreg %b)
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
+; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -1750,8 +1670,6 @@ define inreg i64 @bitcast_v4f16_to_i64_scalar(<4 x half> inreg %a, i32 inreg %b)
; VI-NEXT: v_or_b32_e32 v0, v0, v2
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
-; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -1760,17 +1678,15 @@ define inreg i64 @bitcast_v4f16_to_i64_scalar(<4 x half> inreg %a, i32 inreg %b)
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -1780,17 +1696,15 @@ define inreg i64 @bitcast_v4f16_to_i64_scalar(<4 x half> inreg %a, i32 inreg %b)
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -1914,7 +1828,7 @@ define inreg <4 x bfloat> @bitcast_i64_to_v4bf16_scalar(i64 inreg %a, i32 inreg
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB21_4
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s7, s17, 0xffff0000
; SI-NEXT: s_lshl_b32 s6, s17, 16
@@ -1938,18 +1852,12 @@ define inreg <4 x bfloat> @bitcast_i64_to_v4bf16_scalar(i64 inreg %a, i32 inreg
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s6
; SI-NEXT: v_lshr_b64 v[1:2], v[1:2], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_4:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB21_2
;
; VI-LABEL: bitcast_i64_to_v4bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
@@ -1959,14 +1867,12 @@ define inreg <4 x bfloat> @bitcast_i64_to_v4bf16_scalar(i64 inreg %a, i32 inreg
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_i64_to_v4bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
@@ -1976,15 +1882,13 @@ define inreg <4 x bfloat> @bitcast_i64_to_v4bf16_scalar(i64 inreg %a, i32 inreg
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_i64_to_v4bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
@@ -1995,8 +1899,6 @@ define inreg <4 x bfloat> @bitcast_i64_to_v4bf16_scalar(i64 inreg %a, i32 inreg
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_4:
-; GFX11-NEXT: s_branch .LBB21_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2290,7 +2192,7 @@ define inreg i64 @bitcast_v4bf16_to_i64_scalar(<4 x bfloat> inreg %a, i32 inreg
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s7
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v8
; SI-NEXT: v_lshr_b64 v[0:1], v[5:6], 16
@@ -2312,17 +2214,14 @@ define inreg i64 @bitcast_v4bf16_to_i64_scalar(<4 x bfloat> inreg %a, i32 inreg
; SI-NEXT: v_lshr_b64 v[1:2], v[1:2], 16
; SI-NEXT: .LBB23_3: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v4bf16_to_i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_3
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_4
+; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v4, 0x40c00000
@@ -2364,8 +2263,6 @@ define inreg i64 @bitcast_v4bf16_to_i64_scalar(<4 x bfloat> inreg %a, i32 inreg
; VI-NEXT: v_mov_b32_e32 v1, v2
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB23_3:
-; VI-NEXT: s_branch .LBB23_2
-; VI-NEXT: .LBB23_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -2374,9 +2271,9 @@ define inreg i64 @bitcast_v4bf16_to_i64_scalar(<4 x bfloat> inreg %a, i32 inreg
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_4
+; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, 0x40c00000
@@ -2420,8 +2317,6 @@ define inreg i64 @bitcast_v4bf16_to_i64_scalar(<4 x bfloat> inreg %a, i32 inreg
; GFX9-NEXT: v_lshl_or_b32 v1, v2, 16, v1
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB23_3:
-; GFX9-NEXT: s_branch .LBB23_2
-; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -2431,10 +2326,10 @@ define inreg i64 @bitcast_v4bf16_to_i64_scalar(<4 x bfloat> inreg %a, i32 inreg
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB23_3
; GFX11-TRUE16-NEXT: .LBB23_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -2478,8 +2373,6 @@ define inreg i64 @bitcast_v4bf16_to_i64_scalar(<4 x bfloat> inreg %a, i32 inreg
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB23_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB23_2
-; GFX11-TRUE16-NEXT: .LBB23_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -2488,10 +2381,10 @@ define inreg i64 @bitcast_v4bf16_to_i64_scalar(<4 x bfloat> inreg %a, i32 inreg
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB23_3
; GFX11-FAKE16-NEXT: .LBB23_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -2539,8 +2432,6 @@ define inreg i64 @bitcast_v4bf16_to_i64_scalar(<4 x bfloat> inreg %a, i32 inreg
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v2
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB23_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB23_2
-; GFX11-FAKE16-NEXT: .LBB23_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -2777,7 +2668,7 @@ define inreg <8 x i8> @bitcast_i64_to_v8i8_scalar(i64 inreg %a, i32 inreg %b) {
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB25_4
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s12, s17, 24
; SI-NEXT: s_lshr_b32 s13, s17, 16
@@ -2805,20 +2696,12 @@ define inreg <8 x i8> @bitcast_i64_to_v8i8_scalar(i64 inreg %a, i32 inreg %b) {
; SI-NEXT: v_mov_b32_e32 v6, s13
; SI-NEXT: v_mov_b32_e32 v7, s12
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB25_4:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: s_branch .LBB25_2
;
; VI-LABEL: bitcast_i64_to_v8i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB25_4
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; VI-NEXT: s_lshr_b32 s5, s17, 24
@@ -2846,20 +2729,12 @@ define inreg <8 x i8> @bitcast_i64_to_v8i8_scalar(i64 inreg %a, i32 inreg %b) {
; VI-NEXT: v_mov_b32_e32 v6, s8
; VI-NEXT: v_mov_b32_e32 v7, s5
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB25_4:
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr9
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr5
-; VI-NEXT: s_branch .LBB25_2
;
; GFX9-LABEL: bitcast_i64_to_v8i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; GFX9-NEXT: s_lshr_b32 s5, s17, 24
@@ -2887,21 +2762,13 @@ define inreg <8 x i8> @bitcast_i64_to_v8i8_scalar(i64 inreg %a, i32 inreg %b) {
; GFX9-NEXT: v_mov_b32_e32 v6, s8
; GFX9-NEXT: v_mov_b32_e32 v7, s5
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB25_4:
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr9
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr5
-; GFX9-NEXT: s_branch .LBB25_2
;
; GFX11-LABEL: bitcast_i64_to_v8i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
; GFX11-NEXT: s_lshr_b32 s3, s1, 24
@@ -2927,14 +2794,6 @@ define inreg <8 x i8> @bitcast_i64_to_v8i8_scalar(i64 inreg %a, i32 inreg %b) {
; GFX11-NEXT: v_dual_mov_b32 v4, s1 :: v_dual_mov_b32 v5, s5
; GFX11-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB25_4:
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr2
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: s_branch .LBB25_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3290,7 +3149,7 @@ define inreg i64 @bitcast_v8i8_to_i64_scalar(<8 x i8> inreg %a, i32 inreg %b) {
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB27_4
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -3342,15 +3201,12 @@ define inreg i64 @bitcast_v8i8_to_i64_scalar(<8 x i8> inreg %a, i32 inreg %b) {
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB27_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB27_2
;
; VI-LABEL: bitcast_v8i8_to_i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB27_4
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v1, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v2, s19
@@ -3390,15 +3246,12 @@ define inreg i64 @bitcast_v8i8_to_i64_scalar(<8 x i8> inreg %a, i32 inreg %b) {
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x3000000, v1
; VI-NEXT: .LBB27_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; VI-NEXT: s_branch .LBB27_2
;
; GFX9-LABEL: bitcast_v8i8_to_i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v1, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v2, s19
@@ -3437,16 +3290,13 @@ define inreg i64 @bitcast_v8i8_to_i64_scalar(<8 x i8> inreg %a, i32 inreg %b) {
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB27_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB27_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX9-NEXT: s_branch .LBB27_2
;
; GFX11-LABEL: bitcast_v8i8_to_i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
@@ -3489,9 +3339,6 @@ define inreg i64 @bitcast_v8i8_to_i64_scalar(<8 x i8> inreg %a, i32 inreg %b) {
; GFX11-NEXT: v_or_b32_e32 v1, v3, v4
; GFX11-NEXT: .LBB27_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB27_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX11-NEXT: s_branch .LBB27_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3585,15 +3432,13 @@ define inreg <2 x i32> @bitcast_f64_to_v2i32_scalar(double inreg %a, i32 inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB29_4
+; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB29_3:
-; SI-NEXT: s_branch .LBB29_2
-; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -3602,15 +3447,13 @@ define inreg <2 x i32> @bitcast_f64_to_v2i32_scalar(double inreg %a, i32 inreg %
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
+; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
-; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -3619,15 +3462,13 @@ define inreg <2 x i32> @bitcast_f64_to_v2i32_scalar(double inreg %a, i32 inreg %
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
+; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
-; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -3637,16 +3478,14 @@ define inreg <2 x i32> @bitcast_f64_to_v2i32_scalar(double inreg %a, i32 inreg %
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-NEXT: .LBB29_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
-; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -3745,7 +3584,7 @@ define inreg double @bitcast_v2i32_to_f64_scalar(<2 x i32> inreg %a, i32 inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB31_3
; SI-NEXT: .LBB31_2: ; %cmp.true
@@ -3755,14 +3594,12 @@ define inreg double @bitcast_v2i32_to_f64_scalar(<2 x i32> inreg %a, i32 inreg %
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v2i32_to_f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB31_3
; VI-NEXT: .LBB31_2: ; %cmp.true
@@ -3772,14 +3609,12 @@ define inreg double @bitcast_v2i32_to_f64_scalar(<2 x i32> inreg %a, i32 inreg %
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v2i32_to_f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
@@ -3789,15 +3624,13 @@ define inreg double @bitcast_v2i32_to_f64_scalar(<2 x i32> inreg %a, i32 inreg %
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB31_4:
-; GFX9-NEXT: s_branch .LBB31_2
;
; GFX11-LABEL: bitcast_v2i32_to_f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
@@ -3808,8 +3641,6 @@ define inreg double @bitcast_v2i32_to_f64_scalar(<2 x i32> inreg %a, i32 inreg %
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB31_4:
-; GFX11-NEXT: s_branch .LBB31_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3903,15 +3734,13 @@ define inreg <2 x float> @bitcast_f64_to_v2f32_scalar(double inreg %a, i32 inreg
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB33_4
+; SI-NEXT: s_cbranch_execnz .LBB33_3
; SI-NEXT: .LBB33_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB33_3:
-; SI-NEXT: s_branch .LBB33_2
-; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -3920,15 +3749,13 @@ define inreg <2 x float> @bitcast_f64_to_v2f32_scalar(double inreg %a, i32 inreg
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_4
+; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB33_3:
-; VI-NEXT: s_branch .LBB33_2
-; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -3937,15 +3764,13 @@ define inreg <2 x float> @bitcast_f64_to_v2f32_scalar(double inreg %a, i32 inreg
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
+; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: s_branch .LBB33_2
-; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -3955,16 +3780,14 @@ define inreg <2 x float> @bitcast_f64_to_v2f32_scalar(double inreg %a, i32 inreg
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-NEXT: .LBB33_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: s_branch .LBB33_2
-; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -4062,16 +3885,14 @@ define inreg double @bitcast_v2f32_to_f64_scalar(<2 x float> inreg %a, i32 inreg
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB35_3
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB35_4
+; SI-NEXT: s_cbranch_execnz .LBB35_3
; SI-NEXT: .LBB35_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB35_3:
-; SI-NEXT: s_branch .LBB35_2
-; SI-NEXT: .LBB35_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -4080,16 +3901,14 @@ define inreg double @bitcast_v2f32_to_f64_scalar(<2 x float> inreg %a, i32 inreg
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB35_4
+; VI-NEXT: s_cbranch_execnz .LBB35_3
; VI-NEXT: .LBB35_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB35_3:
-; VI-NEXT: s_branch .LBB35_2
-; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -4098,16 +3917,14 @@ define inreg double @bitcast_v2f32_to_f64_scalar(<2 x float> inreg %a, i32 inreg
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
+; GFX9-NEXT: s_cbranch_execnz .LBB35_3
; GFX9-NEXT: .LBB35_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
-; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -4117,17 +3934,15 @@ define inreg double @bitcast_v2f32_to_f64_scalar(<2 x float> inreg %a, i32 inreg
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-NEXT: .LBB35_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
-; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -4237,26 +4052,22 @@ define inreg <4 x i16> @bitcast_f64_to_v4i16_scalar(double inreg %a, i32 inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB37_3
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s8, s17, 16
; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB37_4
+; SI-NEXT: s_cbranch_execnz .LBB37_3
; SI-NEXT: .LBB37_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: v_lshr_b64 v[2:3], v[0:1], 16
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1
-; SI-NEXT: s_branch .LBB37_5
+; SI-NEXT: s_branch .LBB37_4
; SI-NEXT: .LBB37_3:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB37_2
-; SI-NEXT: .LBB37_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v3, s8
; SI-NEXT: v_mov_b32_e32 v2, s4
-; SI-NEXT: .LBB37_5: ; %end
+; SI-NEXT: .LBB37_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v0, v0, v2
@@ -4269,15 +4080,13 @@ define inreg <4 x i16> @bitcast_f64_to_v4i16_scalar(double inreg %a, i32 inreg %
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB37_3
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_4
+; VI-NEXT: s_cbranch_execnz .LBB37_3
; VI-NEXT: .LBB37_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB37_3:
-; VI-NEXT: s_branch .LBB37_2
-; VI-NEXT: .LBB37_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -4286,15 +4095,13 @@ define inreg <4 x i16> @bitcast_f64_to_v4i16_scalar(double inreg %a, i32 inreg %
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_4
+; GFX9-NEXT: s_cbranch_execnz .LBB37_3
; GFX9-NEXT: .LBB37_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB37_3:
-; GFX9-NEXT: s_branch .LBB37_2
-; GFX9-NEXT: .LBB37_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -4304,16 +4111,14 @@ define inreg <4 x i16> @bitcast_f64_to_v4i16_scalar(double inreg %a, i32 inreg %
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
; GFX11-NEXT: .LBB37_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: s_branch .LBB37_2
-; GFX11-NEXT: .LBB37_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -4448,7 +4253,7 @@ define inreg double @bitcast_v4i16_to_f64_scalar(<4 x i16> inreg %a, i32 inreg %
; SI-NEXT: s_lshr_b32 s8, s17, 16
; SI-NEXT: s_lshr_b32 s9, s16, 16
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB39_4
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s9, 16
@@ -4472,15 +4277,12 @@ define inreg double @bitcast_v4i16_to_f64_scalar(<4 x i16> inreg %a, i32 inreg %
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB39_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB39_2
;
; VI-LABEL: bitcast_v4i16_to_f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB39_4
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB39_3
; VI-NEXT: .LBB39_2: ; %cmp.true
@@ -4498,23 +4300,19 @@ define inreg double @bitcast_v4i16_to_f64_scalar(<4 x i16> inreg %a, i32 inreg %
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_4:
-; VI-NEXT: s_branch .LBB39_2
;
; GFX9-LABEL: bitcast_v4i16_to_f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
+; GFX9-NEXT: s_cbranch_execnz .LBB39_3
; GFX9-NEXT: .LBB39_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
-; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -4524,17 +4322,15 @@ define inreg double @bitcast_v4i16_to_f64_scalar(<4 x i16> inreg %a, i32 inreg %
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_3
; GFX11-NEXT: .LBB39_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
-; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -4644,26 +4440,22 @@ define inreg <4 x half> @bitcast_f64_to_v4f16_scalar(double inreg %a, i32 inreg
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB41_3
+; SI-NEXT: s_cbranch_scc0 .LBB41_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s8, s17, 16
; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB41_4
+; SI-NEXT: s_cbranch_execnz .LBB41_3
; SI-NEXT: .LBB41_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: v_lshr_b64 v[2:3], v[0:1], 16
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1
-; SI-NEXT: s_branch .LBB41_5
+; SI-NEXT: s_branch .LBB41_4
; SI-NEXT: .LBB41_3:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB41_2
-; SI-NEXT: .LBB41_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v3, s8
; SI-NEXT: v_mov_b32_e32 v2, s4
-; SI-NEXT: .LBB41_5: ; %end
+; SI-NEXT: .LBB41_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v0, v0, v2
@@ -4676,15 +4468,13 @@ define inreg <4 x half> @bitcast_f64_to_v4f16_scalar(double inreg %a, i32 inreg
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB41_3
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB41_4
+; VI-NEXT: s_cbranch_execnz .LBB41_3
; VI-NEXT: .LBB41_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB41_3:
-; VI-NEXT: s_branch .LBB41_2
-; VI-NEXT: .LBB41_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -4693,15 +4483,13 @@ define inreg <4 x half> @bitcast_f64_to_v4f16_scalar(double inreg %a, i32 inreg
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB41_4
+; GFX9-NEXT: s_cbranch_execnz .LBB41_3
; GFX9-NEXT: .LBB41_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB41_3:
-; GFX9-NEXT: s_branch .LBB41_2
-; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -4711,16 +4499,14 @@ define inreg <4 x half> @bitcast_f64_to_v4f16_scalar(double inreg %a, i32 inreg
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
; GFX11-NEXT: .LBB41_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB41_3:
-; GFX11-NEXT: s_branch .LBB41_2
-; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -4864,7 +4650,7 @@ define inreg double @bitcast_v4f16_to_f64_scalar(<4 x half> inreg %a, i32 inreg
; SI-NEXT: s_lshr_b32 s8, s17, 16
; SI-NEXT: s_lshr_b32 s9, s16, 16
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB43_3
+; SI-NEXT: s_cbranch_scc0 .LBB43_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s9, 16
@@ -4872,7 +4658,7 @@ define inreg double @bitcast_v4f16_to_f64_scalar(<4 x half> inreg %a, i32 inreg
; SI-NEXT: s_and_b32 s5, s17, 0xffff
; SI-NEXT: s_lshl_b32 s6, s8, 16
; SI-NEXT: s_or_b32 s5, s5, s6
-; SI-NEXT: s_cbranch_execnz .LBB43_4
+; SI-NEXT: s_cbranch_execnz .LBB43_3
; SI-NEXT: .LBB43_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s9
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -4892,9 +4678,6 @@ define inreg double @bitcast_v4f16_to_f64_scalar(<4 x half> inreg %a, i32 inreg
; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB43_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB43_2
-; SI-NEXT: .LBB43_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -4903,9 +4686,9 @@ define inreg double @bitcast_v4f16_to_f64_scalar(<4 x half> inreg %a, i32 inreg
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB43_3
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB43_4
+; VI-NEXT: s_cbranch_execnz .LBB43_3
; VI-NEXT: .LBB43_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -4920,8 +4703,6 @@ define inreg double @bitcast_v4f16_to_f64_scalar(<4 x half> inreg %a, i32 inreg
; VI-NEXT: v_or_b32_e32 v0, v0, v2
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB43_3:
-; VI-NEXT: s_branch .LBB43_2
-; VI-NEXT: .LBB43_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -4930,17 +4711,15 @@ define inreg double @bitcast_v4f16_to_f64_scalar(<4 x half> inreg %a, i32 inreg
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
+; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
-; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -4950,17 +4729,15 @@ define inreg double @bitcast_v4f16_to_f64_scalar(<4 x half> inreg %a, i32 inreg
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-NEXT: .LBB43_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
-; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -5079,32 +4856,26 @@ define inreg <4 x bfloat> @bitcast_f64_to_v4bf16_scalar(double inreg %a, i32 inr
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB45_3
+; SI-NEXT: s_cbranch_scc0 .LBB45_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s9, s17, 0xffff0000
; SI-NEXT: s_lshl_b32 s8, s17, 16
; SI-NEXT: s_and_b32 s7, s16, 0xffff0000
; SI-NEXT: s_lshl_b32 s6, s16, 16
-; SI-NEXT: s_cbranch_execnz .LBB45_4
+; SI-NEXT: s_cbranch_execnz .LBB45_3
; SI-NEXT: .LBB45_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v0
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: s_branch .LBB45_5
+; SI-NEXT: s_branch .LBB45_4
; SI-NEXT: .LBB45_3:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: s_branch .LBB45_2
-; SI-NEXT: .LBB45_4:
; SI-NEXT: v_mov_b32_e32 v3, s9
; SI-NEXT: v_mov_b32_e32 v2, s8
; SI-NEXT: v_mov_b32_e32 v1, s7
; SI-NEXT: v_mov_b32_e32 v0, s6
-; SI-NEXT: .LBB45_5: ; %end
+; SI-NEXT: .LBB45_4: ; %end
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
@@ -5119,15 +4890,13 @@ define inreg <4 x bfloat> @bitcast_f64_to_v4bf16_scalar(double inreg %a, i32 inr
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB45_3
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB45_4
+; VI-NEXT: s_cbranch_execnz .LBB45_3
; VI-NEXT: .LBB45_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB45_3:
-; VI-NEXT: s_branch .LBB45_2
-; VI-NEXT: .LBB45_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -5136,15 +4905,13 @@ define inreg <4 x bfloat> @bitcast_f64_to_v4bf16_scalar(double inreg %a, i32 inr
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB45_4
+; GFX9-NEXT: s_cbranch_execnz .LBB45_3
; GFX9-NEXT: .LBB45_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB45_3:
-; GFX9-NEXT: s_branch .LBB45_2
-; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -5154,16 +4921,14 @@ define inreg <4 x bfloat> @bitcast_f64_to_v4bf16_scalar(double inreg %a, i32 inr
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
; GFX11-NEXT: .LBB45_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB45_3:
-; GFX11-NEXT: s_branch .LBB45_2
-; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -5459,7 +5224,7 @@ define inreg double @bitcast_v4bf16_to_f64_scalar(<4 x bfloat> inreg %a, i32 inr
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s7
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB47_4
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v8
; SI-NEXT: v_lshr_b64 v[0:1], v[5:6], 16
@@ -5481,17 +5246,14 @@ define inreg double @bitcast_v4bf16_to_f64_scalar(<4 x bfloat> inreg %a, i32 inr
; SI-NEXT: v_lshr_b64 v[1:2], v[1:2], 16
; SI-NEXT: .LBB47_3: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB47_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SI-NEXT: s_branch .LBB47_2
;
; VI-LABEL: bitcast_v4bf16_to_f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_4
+; VI-NEXT: s_cbranch_execnz .LBB47_3
; VI-NEXT: .LBB47_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v4, 0x40c00000
@@ -5533,8 +5295,6 @@ define inreg double @bitcast_v4bf16_to_f64_scalar(<4 x bfloat> inreg %a, i32 inr
; VI-NEXT: v_mov_b32_e32 v1, v2
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB47_3:
-; VI-NEXT: s_branch .LBB47_2
-; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -5543,9 +5303,9 @@ define inreg double @bitcast_v4bf16_to_f64_scalar(<4 x bfloat> inreg %a, i32 inr
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
+; GFX9-NEXT: s_cbranch_execnz .LBB47_3
; GFX9-NEXT: .LBB47_2: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, 0x40c00000
@@ -5589,8 +5349,6 @@ define inreg double @bitcast_v4bf16_to_f64_scalar(<4 x bfloat> inreg %a, i32 inr
; GFX9-NEXT: v_lshl_or_b32 v1, v2, 16, v1
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
-; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -5600,10 +5358,10 @@ define inreg double @bitcast_v4bf16_to_f64_scalar(<4 x bfloat> inreg %a, i32 inr
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -5647,8 +5405,6 @@ define inreg double @bitcast_v4bf16_to_f64_scalar(<4 x bfloat> inreg %a, i32 inr
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB47_2
-; GFX11-TRUE16-NEXT: .LBB47_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -5657,10 +5413,10 @@ define inreg double @bitcast_v4bf16_to_f64_scalar(<4 x bfloat> inreg %a, i32 inr
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-FAKE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -5708,8 +5464,6 @@ define inreg double @bitcast_v4bf16_to_f64_scalar(<4 x bfloat> inreg %a, i32 inr
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v2
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB47_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB47_2
-; GFX11-FAKE16-NEXT: .LBB47_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -5939,7 +5693,7 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB49_3
+; SI-NEXT: s_cbranch_scc0 .LBB49_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s14, s17, 24
; SI-NEXT: s_lshr_b32 s13, s17, 16
@@ -5947,7 +5701,7 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16
; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8
-; SI-NEXT: s_cbranch_execnz .LBB49_4
+; SI-NEXT: s_cbranch_execnz .LBB49_3
; SI-NEXT: .LBB49_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[10:11], s[16:17], 1.0
; SI-NEXT: v_lshr_b64 v[3:4], v[10:11], 24
@@ -5956,16 +5710,8 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v11
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v11
; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v11
-; SI-NEXT: s_branch .LBB49_5
+; SI-NEXT: s_branch .LBB49_4
; SI-NEXT: .LBB49_3:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: s_branch .LBB49_2
-; SI-NEXT: .LBB49_4:
; SI-NEXT: v_mov_b32_e32 v11, s17
; SI-NEXT: v_mov_b32_e32 v10, s16
; SI-NEXT: v_mov_b32_e32 v7, s14
@@ -5974,7 +5720,7 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; SI-NEXT: v_mov_b32_e32 v1, s8
; SI-NEXT: v_mov_b32_e32 v8, s6
; SI-NEXT: v_mov_b32_e32 v3, s4
-; SI-NEXT: .LBB49_5: ; %end
+; SI-NEXT: .LBB49_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, v10
; SI-NEXT: v_mov_b32_e32 v2, v8
; SI-NEXT: v_mov_b32_e32 v4, v11
@@ -5984,7 +5730,7 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB49_3
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; VI-NEXT: s_lshr_b32 s9, s17, 24
@@ -5992,7 +5738,7 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; VI-NEXT: s_lshr_b32 s5, s17, 8
; VI-NEXT: s_lshr_b32 s11, s16, 16
; VI-NEXT: s_lshr_b32 s10, s16, 8
-; VI-NEXT: s_cbranch_execnz .LBB49_4
+; VI-NEXT: s_cbranch_execnz .LBB49_3
; VI-NEXT: .LBB49_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[8:9], s[16:17], 1.0
; VI-NEXT: v_lshrrev_b64 v[3:4], 24, v[8:9]
@@ -6001,16 +5747,8 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; VI-NEXT: v_lshrrev_b32_e32 v5, 8, v9
; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v8
; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v8
-; VI-NEXT: s_branch .LBB49_5
+; VI-NEXT: s_branch .LBB49_4
; VI-NEXT: .LBB49_3:
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr5
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr9
-; VI-NEXT: s_branch .LBB49_2
-; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v8, s16
; VI-NEXT: v_mov_b32_e32 v9, s17
; VI-NEXT: v_mov_b32_e32 v2, s11
@@ -6019,7 +5757,7 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; VI-NEXT: v_mov_b32_e32 v7, s9
; VI-NEXT: v_mov_b32_e32 v6, s8
; VI-NEXT: v_mov_b32_e32 v5, s5
-; VI-NEXT: .LBB49_5: ; %end
+; VI-NEXT: .LBB49_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, v8
; VI-NEXT: v_mov_b32_e32 v4, v9
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -6028,7 +5766,7 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; GFX9-NEXT: s_lshr_b32 s9, s17, 24
@@ -6036,7 +5774,7 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; GFX9-NEXT: s_lshr_b32 s5, s17, 8
; GFX9-NEXT: s_lshr_b32 s11, s16, 16
; GFX9-NEXT: s_lshr_b32 s10, s16, 8
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
+; GFX9-NEXT: s_cbranch_execnz .LBB49_3
; GFX9-NEXT: .LBB49_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[8:9], s[16:17], 1.0
; GFX9-NEXT: v_lshrrev_b64 v[3:4], 24, v[8:9]
@@ -6045,16 +5783,8 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v8
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v8
-; GFX9-NEXT: s_branch .LBB49_5
+; GFX9-NEXT: s_branch .LBB49_4
; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr5
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr9
-; GFX9-NEXT: s_branch .LBB49_2
-; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v8, s16
; GFX9-NEXT: v_mov_b32_e32 v9, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s11
@@ -6063,7 +5793,7 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; GFX9-NEXT: v_mov_b32_e32 v7, s9
; GFX9-NEXT: v_mov_b32_e32 v6, s8
; GFX9-NEXT: v_mov_b32_e32 v5, s5
-; GFX9-NEXT: .LBB49_5: ; %end
+; GFX9-NEXT: .LBB49_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, v8
; GFX9-NEXT: v_mov_b32_e32 v4, v9
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -6073,7 +5803,7 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
; GFX11-NEXT: s_lshr_b32 s6, s1, 24
@@ -6082,7 +5812,7 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; GFX11-NEXT: s_lshr_b32 s8, s0, 16
; GFX11-NEXT: s_lshr_b32 s7, s0, 8
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[8:9], s[0:1], 1.0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -6092,22 +5822,14 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v9
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v8
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v8
-; GFX11-NEXT: s_branch .LBB49_5
+; GFX11-NEXT: s_branch .LBB49_4
; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr2
-; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB49_2
-; GFX11-NEXT: .LBB49_4:
; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s8 :: v_dual_mov_b32 v1, s7
; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v6, s5
; GFX11-NEXT: v_mov_b32_e32 v7, s6
; GFX11-NEXT: v_mov_b32_e32 v5, s3
-; GFX11-NEXT: .LBB49_5: ; %end
+; GFX11-NEXT: .LBB49_4: ; %end
; GFX11-NEXT: v_mov_b32_e32 v0, v8
; GFX11-NEXT: v_mov_b32_e32 v4, v9
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -6466,7 +6188,7 @@ define inreg double @bitcast_v8i8_to_f64_scalar(<8 x i8> inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB51_4
+; SI-NEXT: s_cbranch_scc0 .LBB51_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -6518,15 +6240,12 @@ define inreg double @bitcast_v8i8_to_f64_scalar(<8 x i8> inreg %a, i32 inreg %b)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB51_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB51_2
;
; VI-LABEL: bitcast_v8i8_to_f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB51_4
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v1, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v2, s19
@@ -6566,15 +6285,12 @@ define inreg double @bitcast_v8i8_to_f64_scalar(<8 x i8> inreg %a, i32 inreg %b)
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x3000000, v1
; VI-NEXT: .LBB51_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; VI-NEXT: s_branch .LBB51_2
;
; GFX9-LABEL: bitcast_v8i8_to_f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v1, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v2, s19
@@ -6613,16 +6329,13 @@ define inreg double @bitcast_v8i8_to_f64_scalar(<8 x i8> inreg %a, i32 inreg %b)
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB51_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB51_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX9-NEXT: s_branch .LBB51_2
;
; GFX11-LABEL: bitcast_v8i8_to_f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
@@ -6665,9 +6378,6 @@ define inreg double @bitcast_v8i8_to_f64_scalar(<8 x i8> inreg %a, i32 inreg %b)
; GFX11-NEXT: v_or_b32_e32 v1, v3, v4
; GFX11-NEXT: .LBB51_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB51_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX11-NEXT: s_branch .LBB51_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6764,7 +6474,7 @@ define inreg <2 x float> @bitcast_v2i32_to_v2f32_scalar(<2 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB53_4
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB53_3
; SI-NEXT: .LBB53_2: ; %cmp.true
@@ -6774,14 +6484,12 @@ define inreg <2 x float> @bitcast_v2i32_to_v2f32_scalar(<2 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB53_4:
-; SI-NEXT: s_branch .LBB53_2
;
; VI-LABEL: bitcast_v2i32_to_v2f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB53_4
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB53_3
; VI-NEXT: .LBB53_2: ; %cmp.true
@@ -6791,14 +6499,12 @@ define inreg <2 x float> @bitcast_v2i32_to_v2f32_scalar(<2 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB53_4:
-; VI-NEXT: s_branch .LBB53_2
;
; GFX9-LABEL: bitcast_v2i32_to_v2f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB53_3
; GFX9-NEXT: .LBB53_2: ; %cmp.true
@@ -6808,15 +6514,13 @@ define inreg <2 x float> @bitcast_v2i32_to_v2f32_scalar(<2 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB53_4:
-; GFX9-NEXT: s_branch .LBB53_2
;
; GFX11-LABEL: bitcast_v2i32_to_v2f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
@@ -6827,8 +6531,6 @@ define inreg <2 x float> @bitcast_v2i32_to_v2f32_scalar(<2 x i32> inreg %a, i32
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6924,16 +6626,14 @@ define inreg <2 x i32> @bitcast_v2f32_to_v2i32_scalar(<2 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB55_3
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB55_4
+; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB55_3:
-; SI-NEXT: s_branch .LBB55_2
-; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -6942,16 +6642,14 @@ define inreg <2 x i32> @bitcast_v2f32_to_v2i32_scalar(<2 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_4
+; VI-NEXT: s_cbranch_execnz .LBB55_3
; VI-NEXT: .LBB55_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB55_3:
-; VI-NEXT: s_branch .LBB55_2
-; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -6960,16 +6658,14 @@ define inreg <2 x i32> @bitcast_v2f32_to_v2i32_scalar(<2 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
+; GFX9-NEXT: s_cbranch_execnz .LBB55_3
; GFX9-NEXT: .LBB55_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
-; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -6979,17 +6675,15 @@ define inreg <2 x i32> @bitcast_v2f32_to_v2i32_scalar(<2 x float> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-NEXT: .LBB55_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
-; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -7102,7 +6796,7 @@ define inreg <4 x i16> @bitcast_v2i32_to_v4i16_scalar(<2 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB57_4
+; SI-NEXT: s_cbranch_scc0 .LBB57_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s8, s17, 16
; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16
@@ -7122,16 +6816,12 @@ define inreg <4 x i16> @bitcast_v2i32_to_v4i16_scalar(<2 x i32> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB57_4:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB57_2
;
; VI-LABEL: bitcast_v2i32_to_v4i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB57_3
; VI-NEXT: .LBB57_2: ; %cmp.true
@@ -7141,14 +6831,12 @@ define inreg <4 x i16> @bitcast_v2i32_to_v4i16_scalar(<2 x i32> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v2i32_to_v4i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB57_3
; GFX9-NEXT: .LBB57_2: ; %cmp.true
@@ -7158,15 +6846,13 @@ define inreg <4 x i16> @bitcast_v2i32_to_v4i16_scalar(<2 x i32> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB57_4:
-; GFX9-NEXT: s_branch .LBB57_2
;
; GFX11-LABEL: bitcast_v2i32_to_v4i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB57_3
@@ -7177,8 +6863,6 @@ define inreg <4 x i16> @bitcast_v2i32_to_v4i16_scalar(<2 x i32> inreg %a, i32 in
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB57_4:
-; GFX11-NEXT: s_branch .LBB57_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7311,7 +6995,7 @@ define inreg <2 x i32> @bitcast_v4i16_to_v2i32_scalar(<4 x i16> inreg %a, i32 in
; SI-NEXT: s_lshr_b32 s8, s17, 16
; SI-NEXT: s_lshr_b32 s9, s16, 16
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB59_4
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s9, 16
@@ -7335,15 +7019,12 @@ define inreg <2 x i32> @bitcast_v4i16_to_v2i32_scalar(<4 x i16> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB59_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB59_2
;
; VI-LABEL: bitcast_v4i16_to_v2i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_4
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB59_3
; VI-NEXT: .LBB59_2: ; %cmp.true
@@ -7361,23 +7042,19 @@ define inreg <2 x i32> @bitcast_v4i16_to_v2i32_scalar(<4 x i16> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB59_4:
-; VI-NEXT: s_branch .LBB59_2
;
; GFX9-LABEL: bitcast_v4i16_to_v2i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
+; GFX9-NEXT: s_cbranch_execnz .LBB59_3
; GFX9-NEXT: .LBB59_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
-; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -7387,17 +7064,15 @@ define inreg <2 x i32> @bitcast_v4i16_to_v2i32_scalar(<4 x i16> inreg %a, i32 in
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-NEXT: .LBB59_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB59_3:
-; GFX11-NEXT: s_branch .LBB59_2
-; GFX11-NEXT: .LBB59_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -7510,7 +7185,7 @@ define inreg <4 x half> @bitcast_v2i32_to_v4f16_scalar(<2 x i32> inreg %a, i32 i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB61_4
+; SI-NEXT: s_cbranch_scc0 .LBB61_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s8, s17, 16
; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16
@@ -7530,16 +7205,12 @@ define inreg <4 x half> @bitcast_v2i32_to_v4f16_scalar(<2 x i32> inreg %a, i32 i
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB61_4:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB61_2
;
; VI-LABEL: bitcast_v2i32_to_v4f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB61_4
+; VI-NEXT: s_cbranch_scc0 .LBB61_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB61_3
; VI-NEXT: .LBB61_2: ; %cmp.true
@@ -7549,14 +7220,12 @@ define inreg <4 x half> @bitcast_v2i32_to_v4f16_scalar(<2 x i32> inreg %a, i32 i
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB61_4:
-; VI-NEXT: s_branch .LBB61_2
;
; GFX9-LABEL: bitcast_v2i32_to_v4f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB61_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB61_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB61_3
; GFX9-NEXT: .LBB61_2: ; %cmp.true
@@ -7566,15 +7235,13 @@ define inreg <4 x half> @bitcast_v2i32_to_v4f16_scalar(<2 x i32> inreg %a, i32 i
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB61_4:
-; GFX9-NEXT: s_branch .LBB61_2
;
; GFX11-LABEL: bitcast_v2i32_to_v4f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB61_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB61_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB61_3
@@ -7585,8 +7252,6 @@ define inreg <4 x half> @bitcast_v2i32_to_v4f16_scalar(<2 x i32> inreg %a, i32 i
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB61_4:
-; GFX11-NEXT: s_branch .LBB61_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7728,7 +7393,7 @@ define inreg <2 x i32> @bitcast_v4f16_to_v2i32_scalar(<4 x half> inreg %a, i32 i
; SI-NEXT: s_lshr_b32 s8, s17, 16
; SI-NEXT: s_lshr_b32 s9, s16, 16
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB63_3
+; SI-NEXT: s_cbranch_scc0 .LBB63_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s9, 16
@@ -7736,7 +7401,7 @@ define inreg <2 x i32> @bitcast_v4f16_to_v2i32_scalar(<4 x half> inreg %a, i32 i
; SI-NEXT: s_and_b32 s5, s17, 0xffff
; SI-NEXT: s_lshl_b32 s6, s8, 16
; SI-NEXT: s_or_b32 s5, s5, s6
-; SI-NEXT: s_cbranch_execnz .LBB63_4
+; SI-NEXT: s_cbranch_execnz .LBB63_3
; SI-NEXT: .LBB63_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s9
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -7756,9 +7421,6 @@ define inreg <2 x i32> @bitcast_v4f16_to_v2i32_scalar(<4 x half> inreg %a, i32 i
; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB63_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB63_2
-; SI-NEXT: .LBB63_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -7767,9 +7429,9 @@ define inreg <2 x i32> @bitcast_v4f16_to_v2i32_scalar(<4 x half> inreg %a, i32 i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB63_3
+; VI-NEXT: s_cbranch_scc0 .LBB63_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB63_4
+; VI-NEXT: s_cbranch_execnz .LBB63_3
; VI-NEXT: .LBB63_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -7784,8 +7446,6 @@ define inreg <2 x i32> @bitcast_v4f16_to_v2i32_scalar(<4 x half> inreg %a, i32 i
; VI-NEXT: v_or_b32_e32 v0, v0, v2
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB63_3:
-; VI-NEXT: s_branch .LBB63_2
-; VI-NEXT: .LBB63_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -7794,17 +7454,15 @@ define inreg <2 x i32> @bitcast_v4f16_to_v2i32_scalar(<4 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB63_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB63_4
+; GFX9-NEXT: s_cbranch_execnz .LBB63_3
; GFX9-NEXT: .LBB63_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB63_3:
-; GFX9-NEXT: s_branch .LBB63_2
-; GFX9-NEXT: .LBB63_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -7814,17 +7472,15 @@ define inreg <2 x i32> @bitcast_v4f16_to_v2i32_scalar(<4 x half> inreg %a, i32 i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB63_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB63_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB63_3
; GFX11-NEXT: .LBB63_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB63_3:
-; GFX11-NEXT: s_branch .LBB63_2
-; GFX11-NEXT: .LBB63_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -7947,7 +7603,7 @@ define inreg <4 x bfloat> @bitcast_v2i32_to_v4bf16_scalar(<2 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB65_4
+; SI-NEXT: s_cbranch_scc0 .LBB65_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s7, s17, 0xffff0000
; SI-NEXT: s_lshl_b32 s6, s17, 16
@@ -7971,18 +7627,12 @@ define inreg <4 x bfloat> @bitcast_v2i32_to_v4bf16_scalar(<2 x i32> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s6
; SI-NEXT: v_lshr_b64 v[1:2], v[1:2], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB65_4:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB65_2
;
; VI-LABEL: bitcast_v2i32_to_v4bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB65_4
+; VI-NEXT: s_cbranch_scc0 .LBB65_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB65_3
; VI-NEXT: .LBB65_2: ; %cmp.true
@@ -7992,14 +7642,12 @@ define inreg <4 x bfloat> @bitcast_v2i32_to_v4bf16_scalar(<2 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB65_4:
-; VI-NEXT: s_branch .LBB65_2
;
; GFX9-LABEL: bitcast_v2i32_to_v4bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB65_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB65_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB65_3
; GFX9-NEXT: .LBB65_2: ; %cmp.true
@@ -8009,15 +7657,13 @@ define inreg <4 x bfloat> @bitcast_v2i32_to_v4bf16_scalar(<2 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB65_4:
-; GFX9-NEXT: s_branch .LBB65_2
;
; GFX11-LABEL: bitcast_v2i32_to_v4bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB65_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB65_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB65_3
@@ -8028,8 +7674,6 @@ define inreg <4 x bfloat> @bitcast_v2i32_to_v4bf16_scalar(<2 x i32> inreg %a, i3
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB65_4:
-; GFX11-NEXT: s_branch .LBB65_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8323,7 +7967,7 @@ define inreg <2 x i32> @bitcast_v4bf16_to_v2i32_scalar(<4 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s7
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB67_4
+; SI-NEXT: s_cbranch_scc0 .LBB67_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v8
; SI-NEXT: v_lshr_b64 v[0:1], v[5:6], 16
@@ -8345,17 +7989,14 @@ define inreg <2 x i32> @bitcast_v4bf16_to_v2i32_scalar(<4 x bfloat> inreg %a, i3
; SI-NEXT: v_lshr_b64 v[1:2], v[1:2], 16
; SI-NEXT: .LBB67_3: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB67_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SI-NEXT: s_branch .LBB67_2
;
; VI-LABEL: bitcast_v4bf16_to_v2i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB67_3
+; VI-NEXT: s_cbranch_scc0 .LBB67_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB67_4
+; VI-NEXT: s_cbranch_execnz .LBB67_3
; VI-NEXT: .LBB67_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v4, 0x40c00000
@@ -8397,8 +8038,6 @@ define inreg <2 x i32> @bitcast_v4bf16_to_v2i32_scalar(<4 x bfloat> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v1, v2
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB67_3:
-; VI-NEXT: s_branch .LBB67_2
-; VI-NEXT: .LBB67_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -8407,9 +8046,9 @@ define inreg <2 x i32> @bitcast_v4bf16_to_v2i32_scalar(<4 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB67_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB67_4
+; GFX9-NEXT: s_cbranch_execnz .LBB67_3
; GFX9-NEXT: .LBB67_2: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, 0x40c00000
@@ -8453,8 +8092,6 @@ define inreg <2 x i32> @bitcast_v4bf16_to_v2i32_scalar(<4 x bfloat> inreg %a, i3
; GFX9-NEXT: v_lshl_or_b32 v1, v2, 16, v1
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB67_3:
-; GFX9-NEXT: s_branch .LBB67_2
-; GFX9-NEXT: .LBB67_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -8464,10 +8101,10 @@ define inreg <2 x i32> @bitcast_v4bf16_to_v2i32_scalar(<4 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB67_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB67_3
; GFX11-TRUE16-NEXT: .LBB67_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -8511,8 +8148,6 @@ define inreg <2 x i32> @bitcast_v4bf16_to_v2i32_scalar(<4 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB67_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB67_2
-; GFX11-TRUE16-NEXT: .LBB67_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -8521,10 +8156,10 @@ define inreg <2 x i32> @bitcast_v4bf16_to_v2i32_scalar(<4 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB67_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB67_3
; GFX11-FAKE16-NEXT: .LBB67_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -8572,8 +8207,6 @@ define inreg <2 x i32> @bitcast_v4bf16_to_v2i32_scalar(<4 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v2
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB67_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB67_2
-; GFX11-FAKE16-NEXT: .LBB67_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -8808,7 +8441,7 @@ define inreg <8 x i8> @bitcast_v2i32_to_v8i8_scalar(<2 x i32> inreg %a, i32 inre
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB69_4
+; SI-NEXT: s_cbranch_scc0 .LBB69_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s12, s17, 24
; SI-NEXT: s_lshr_b32 s13, s17, 16
@@ -8836,20 +8469,12 @@ define inreg <8 x i8> @bitcast_v2i32_to_v8i8_scalar(<2 x i32> inreg %a, i32 inre
; SI-NEXT: v_mov_b32_e32 v6, s13
; SI-NEXT: v_mov_b32_e32 v7, s12
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB69_4:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: s_branch .LBB69_2
;
; VI-LABEL: bitcast_v2i32_to_v8i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB69_4
+; VI-NEXT: s_cbranch_scc0 .LBB69_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; VI-NEXT: s_lshr_b32 s5, s17, 24
@@ -8877,20 +8502,12 @@ define inreg <8 x i8> @bitcast_v2i32_to_v8i8_scalar(<2 x i32> inreg %a, i32 inre
; VI-NEXT: v_mov_b32_e32 v6, s8
; VI-NEXT: v_mov_b32_e32 v7, s5
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB69_4:
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr9
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr5
-; VI-NEXT: s_branch .LBB69_2
;
; GFX9-LABEL: bitcast_v2i32_to_v8i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB69_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB69_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; GFX9-NEXT: s_lshr_b32 s5, s17, 24
@@ -8918,21 +8535,13 @@ define inreg <8 x i8> @bitcast_v2i32_to_v8i8_scalar(<2 x i32> inreg %a, i32 inre
; GFX9-NEXT: v_mov_b32_e32 v6, s8
; GFX9-NEXT: v_mov_b32_e32 v7, s5
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB69_4:
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr9
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr5
-; GFX9-NEXT: s_branch .LBB69_2
;
; GFX11-LABEL: bitcast_v2i32_to_v8i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB69_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB69_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
; GFX11-NEXT: s_lshr_b32 s3, s1, 24
@@ -8958,14 +8567,6 @@ define inreg <8 x i8> @bitcast_v2i32_to_v8i8_scalar(<2 x i32> inreg %a, i32 inre
; GFX11-NEXT: v_dual_mov_b32 v4, s1 :: v_dual_mov_b32 v5, s5
; GFX11-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB69_4:
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr2
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: s_branch .LBB69_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9321,7 +8922,7 @@ define inreg <2 x i32> @bitcast_v8i8_to_v2i32_scalar(<8 x i8> inreg %a, i32 inre
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB71_4
+; SI-NEXT: s_cbranch_scc0 .LBB71_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -9373,15 +8974,12 @@ define inreg <2 x i32> @bitcast_v8i8_to_v2i32_scalar(<8 x i8> inreg %a, i32 inre
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB71_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB71_2
;
; VI-LABEL: bitcast_v8i8_to_v2i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB71_4
+; VI-NEXT: s_cbranch_scc0 .LBB71_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v1, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v2, s19
@@ -9421,15 +9019,12 @@ define inreg <2 x i32> @bitcast_v8i8_to_v2i32_scalar(<8 x i8> inreg %a, i32 inre
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x3000000, v1
; VI-NEXT: .LBB71_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB71_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; VI-NEXT: s_branch .LBB71_2
;
; GFX9-LABEL: bitcast_v8i8_to_v2i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB71_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB71_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v1, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v2, s19
@@ -9468,16 +9063,13 @@ define inreg <2 x i32> @bitcast_v8i8_to_v2i32_scalar(<8 x i8> inreg %a, i32 inre
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB71_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB71_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX9-NEXT: s_branch .LBB71_2
;
; GFX11-LABEL: bitcast_v8i8_to_v2i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB71_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB71_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
@@ -9520,9 +9112,6 @@ define inreg <2 x i32> @bitcast_v8i8_to_v2i32_scalar(<8 x i8> inreg %a, i32 inre
; GFX11-NEXT: v_or_b32_e32 v1, v3, v4
; GFX11-NEXT: .LBB71_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB71_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX11-NEXT: s_branch .LBB71_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9632,27 +9221,23 @@ define inreg <4 x i16> @bitcast_v2f32_to_v4i16_scalar(<2 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB73_3
+; SI-NEXT: s_cbranch_scc0 .LBB73_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s8, s17, 16
; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB73_4
+; SI-NEXT: s_cbranch_execnz .LBB73_3
; SI-NEXT: .LBB73_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: v_lshr_b64 v[2:3], v[0:1], 16
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1
-; SI-NEXT: s_branch .LBB73_5
+; SI-NEXT: s_branch .LBB73_4
; SI-NEXT: .LBB73_3:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB73_2
-; SI-NEXT: .LBB73_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v3, s8
; SI-NEXT: v_mov_b32_e32 v2, s4
-; SI-NEXT: .LBB73_5: ; %end
+; SI-NEXT: .LBB73_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v0, v0, v2
@@ -9665,16 +9250,14 @@ define inreg <4 x i16> @bitcast_v2f32_to_v4i16_scalar(<2 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB73_3
+; VI-NEXT: s_cbranch_scc0 .LBB73_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB73_4
+; VI-NEXT: s_cbranch_execnz .LBB73_3
; VI-NEXT: .LBB73_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB73_3:
-; VI-NEXT: s_branch .LBB73_2
-; VI-NEXT: .LBB73_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -9683,16 +9266,14 @@ define inreg <4 x i16> @bitcast_v2f32_to_v4i16_scalar(<2 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB73_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB73_4
+; GFX9-NEXT: s_cbranch_execnz .LBB73_3
; GFX9-NEXT: .LBB73_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB73_3:
-; GFX9-NEXT: s_branch .LBB73_2
-; GFX9-NEXT: .LBB73_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -9702,17 +9283,15 @@ define inreg <4 x i16> @bitcast_v2f32_to_v4i16_scalar(<2 x float> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB73_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB73_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB73_3
; GFX11-NEXT: .LBB73_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB73_3:
-; GFX11-NEXT: s_branch .LBB73_2
-; GFX11-NEXT: .LBB73_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -9847,7 +9426,7 @@ define inreg <2 x float> @bitcast_v4i16_to_v2f32_scalar(<4 x i16> inreg %a, i32
; SI-NEXT: s_lshr_b32 s8, s17, 16
; SI-NEXT: s_lshr_b32 s9, s16, 16
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB75_4
+; SI-NEXT: s_cbranch_scc0 .LBB75_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s9, 16
@@ -9871,15 +9450,12 @@ define inreg <2 x float> @bitcast_v4i16_to_v2f32_scalar(<4 x i16> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB75_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB75_2
;
; VI-LABEL: bitcast_v4i16_to_v2f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB75_4
+; VI-NEXT: s_cbranch_scc0 .LBB75_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB75_3
; VI-NEXT: .LBB75_2: ; %cmp.true
@@ -9897,23 +9473,19 @@ define inreg <2 x float> @bitcast_v4i16_to_v2f32_scalar(<4 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB75_4:
-; VI-NEXT: s_branch .LBB75_2
;
; GFX9-LABEL: bitcast_v4i16_to_v2f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB75_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB75_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB75_4
+; GFX9-NEXT: s_cbranch_execnz .LBB75_3
; GFX9-NEXT: .LBB75_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB75_3:
-; GFX9-NEXT: s_branch .LBB75_2
-; GFX9-NEXT: .LBB75_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -9923,17 +9495,15 @@ define inreg <2 x float> @bitcast_v4i16_to_v2f32_scalar(<4 x i16> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB75_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB75_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB75_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB75_3
; GFX11-NEXT: .LBB75_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB75_3:
-; GFX11-NEXT: s_branch .LBB75_2
-; GFX11-NEXT: .LBB75_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -10045,27 +9615,23 @@ define inreg <4 x half> @bitcast_v2f32_to_v4f16_scalar(<2 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB77_3
+; SI-NEXT: s_cbranch_scc0 .LBB77_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s8, s17, 16
; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB77_4
+; SI-NEXT: s_cbranch_execnz .LBB77_3
; SI-NEXT: .LBB77_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: v_lshr_b64 v[2:3], v[0:1], 16
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1
-; SI-NEXT: s_branch .LBB77_5
+; SI-NEXT: s_branch .LBB77_4
; SI-NEXT: .LBB77_3:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB77_2
-; SI-NEXT: .LBB77_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v3, s8
; SI-NEXT: v_mov_b32_e32 v2, s4
-; SI-NEXT: .LBB77_5: ; %end
+; SI-NEXT: .LBB77_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v0, v0, v2
@@ -10078,16 +9644,14 @@ define inreg <4 x half> @bitcast_v2f32_to_v4f16_scalar(<2 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB77_3
+; VI-NEXT: s_cbranch_scc0 .LBB77_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB77_4
+; VI-NEXT: s_cbranch_execnz .LBB77_3
; VI-NEXT: .LBB77_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB77_3:
-; VI-NEXT: s_branch .LBB77_2
-; VI-NEXT: .LBB77_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -10096,16 +9660,14 @@ define inreg <4 x half> @bitcast_v2f32_to_v4f16_scalar(<2 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB77_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB77_4
+; GFX9-NEXT: s_cbranch_execnz .LBB77_3
; GFX9-NEXT: .LBB77_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB77_3:
-; GFX9-NEXT: s_branch .LBB77_2
-; GFX9-NEXT: .LBB77_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -10115,17 +9677,15 @@ define inreg <4 x half> @bitcast_v2f32_to_v4f16_scalar(<2 x float> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB77_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB77_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB77_3
; GFX11-NEXT: .LBB77_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB77_3:
-; GFX11-NEXT: s_branch .LBB77_2
-; GFX11-NEXT: .LBB77_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -10269,7 +9829,7 @@ define inreg <2 x float> @bitcast_v4f16_to_v2f32_scalar(<4 x half> inreg %a, i32
; SI-NEXT: s_lshr_b32 s8, s17, 16
; SI-NEXT: s_lshr_b32 s9, s16, 16
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB79_3
+; SI-NEXT: s_cbranch_scc0 .LBB79_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s9, 16
@@ -10277,7 +9837,7 @@ define inreg <2 x float> @bitcast_v4f16_to_v2f32_scalar(<4 x half> inreg %a, i32
; SI-NEXT: s_and_b32 s5, s17, 0xffff
; SI-NEXT: s_lshl_b32 s6, s8, 16
; SI-NEXT: s_or_b32 s5, s5, s6
-; SI-NEXT: s_cbranch_execnz .LBB79_4
+; SI-NEXT: s_cbranch_execnz .LBB79_3
; SI-NEXT: .LBB79_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s9
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -10297,9 +9857,6 @@ define inreg <2 x float> @bitcast_v4f16_to_v2f32_scalar(<4 x half> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB79_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB79_2
-; SI-NEXT: .LBB79_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -10308,9 +9865,9 @@ define inreg <2 x float> @bitcast_v4f16_to_v2f32_scalar(<4 x half> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB79_3
+; VI-NEXT: s_cbranch_scc0 .LBB79_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB79_4
+; VI-NEXT: s_cbranch_execnz .LBB79_3
; VI-NEXT: .LBB79_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -10325,8 +9882,6 @@ define inreg <2 x float> @bitcast_v4f16_to_v2f32_scalar(<4 x half> inreg %a, i32
; VI-NEXT: v_or_b32_e32 v0, v0, v2
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB79_3:
-; VI-NEXT: s_branch .LBB79_2
-; VI-NEXT: .LBB79_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -10335,17 +9890,15 @@ define inreg <2 x float> @bitcast_v4f16_to_v2f32_scalar(<4 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB79_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB79_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB79_4
+; GFX9-NEXT: s_cbranch_execnz .LBB79_3
; GFX9-NEXT: .LBB79_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB79_3:
-; GFX9-NEXT: s_branch .LBB79_2
-; GFX9-NEXT: .LBB79_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -10355,17 +9908,15 @@ define inreg <2 x float> @bitcast_v4f16_to_v2f32_scalar(<4 x half> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB79_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB79_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB79_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB79_3
; GFX11-NEXT: .LBB79_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB79_3:
-; GFX11-NEXT: s_branch .LBB79_2
-; GFX11-NEXT: .LBB79_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -10487,13 +10038,13 @@ define inreg <4 x bfloat> @bitcast_v2f32_to_v4bf16_scalar(<2 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB81_3
+; SI-NEXT: s_cbranch_scc0 .LBB81_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s17, 0xffff0000
; SI-NEXT: s_lshl_b32 s7, s17, 16
; SI-NEXT: s_and_b32 s8, s16, 0xffff0000
; SI-NEXT: s_lshl_b32 s9, s16, 16
-; SI-NEXT: s_cbranch_execnz .LBB81_4
+; SI-NEXT: s_cbranch_execnz .LBB81_3
; SI-NEXT: .LBB81_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
@@ -10501,19 +10052,13 @@ define inreg <4 x bfloat> @bitcast_v2f32_to_v4bf16_scalar(<2 x float> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v0
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: s_branch .LBB81_5
+; SI-NEXT: s_branch .LBB81_4
; SI-NEXT: .LBB81_3:
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB81_2
-; SI-NEXT: .LBB81_4:
; SI-NEXT: v_mov_b32_e32 v0, s9
; SI-NEXT: v_mov_b32_e32 v1, s8
; SI-NEXT: v_mov_b32_e32 v2, s7
; SI-NEXT: v_mov_b32_e32 v3, s6
-; SI-NEXT: .LBB81_5: ; %end
+; SI-NEXT: .LBB81_4: ; %end
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
@@ -10528,16 +10073,14 @@ define inreg <4 x bfloat> @bitcast_v2f32_to_v4bf16_scalar(<2 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB81_3
+; VI-NEXT: s_cbranch_scc0 .LBB81_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB81_4
+; VI-NEXT: s_cbranch_execnz .LBB81_3
; VI-NEXT: .LBB81_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB81_3:
-; VI-NEXT: s_branch .LBB81_2
-; VI-NEXT: .LBB81_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -10546,16 +10089,14 @@ define inreg <4 x bfloat> @bitcast_v2f32_to_v4bf16_scalar(<2 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB81_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB81_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB81_4
+; GFX9-NEXT: s_cbranch_execnz .LBB81_3
; GFX9-NEXT: .LBB81_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB81_3:
-; GFX9-NEXT: s_branch .LBB81_2
-; GFX9-NEXT: .LBB81_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -10565,17 +10106,15 @@ define inreg <4 x bfloat> @bitcast_v2f32_to_v4bf16_scalar(<2 x float> inreg %a,
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB81_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB81_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB81_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB81_3
; GFX11-NEXT: .LBB81_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB81_3:
-; GFX11-NEXT: s_branch .LBB81_2
-; GFX11-NEXT: .LBB81_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -10871,7 +10410,7 @@ define inreg <2 x float> @bitcast_v4bf16_to_v2f32_scalar(<4 x bfloat> inreg %a,
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s7
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB83_4
+; SI-NEXT: s_cbranch_scc0 .LBB83_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v8
; SI-NEXT: v_lshr_b64 v[0:1], v[5:6], 16
@@ -10893,17 +10432,14 @@ define inreg <2 x float> @bitcast_v4bf16_to_v2f32_scalar(<4 x bfloat> inreg %a,
; SI-NEXT: v_lshr_b64 v[1:2], v[1:2], 16
; SI-NEXT: .LBB83_3: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB83_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SI-NEXT: s_branch .LBB83_2
;
; VI-LABEL: bitcast_v4bf16_to_v2f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB83_3
+; VI-NEXT: s_cbranch_scc0 .LBB83_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB83_4
+; VI-NEXT: s_cbranch_execnz .LBB83_3
; VI-NEXT: .LBB83_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v4, 0x40c00000
@@ -10945,8 +10481,6 @@ define inreg <2 x float> @bitcast_v4bf16_to_v2f32_scalar(<4 x bfloat> inreg %a,
; VI-NEXT: v_mov_b32_e32 v1, v2
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB83_3:
-; VI-NEXT: s_branch .LBB83_2
-; VI-NEXT: .LBB83_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -10955,9 +10489,9 @@ define inreg <2 x float> @bitcast_v4bf16_to_v2f32_scalar(<4 x bfloat> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB83_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB83_4
+; GFX9-NEXT: s_cbranch_execnz .LBB83_3
; GFX9-NEXT: .LBB83_2: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, 0x40c00000
@@ -11001,8 +10535,6 @@ define inreg <2 x float> @bitcast_v4bf16_to_v2f32_scalar(<4 x bfloat> inreg %a,
; GFX9-NEXT: v_lshl_or_b32 v1, v2, 16, v1
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB83_3:
-; GFX9-NEXT: s_branch .LBB83_2
-; GFX9-NEXT: .LBB83_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -11012,10 +10544,10 @@ define inreg <2 x float> @bitcast_v4bf16_to_v2f32_scalar(<4 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB83_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB83_3
; GFX11-TRUE16-NEXT: .LBB83_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -11059,8 +10591,6 @@ define inreg <2 x float> @bitcast_v4bf16_to_v2f32_scalar(<4 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB83_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB83_2
-; GFX11-TRUE16-NEXT: .LBB83_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -11069,10 +10599,10 @@ define inreg <2 x float> @bitcast_v4bf16_to_v2f32_scalar(<4 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB83_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB83_3
; GFX11-FAKE16-NEXT: .LBB83_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -11120,8 +10650,6 @@ define inreg <2 x float> @bitcast_v4bf16_to_v2f32_scalar(<4 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v2
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB83_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB83_2
-; GFX11-FAKE16-NEXT: .LBB83_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -11354,7 +10882,7 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB85_3
+; SI-NEXT: s_cbranch_scc0 .LBB85_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s12, s17, 24
; SI-NEXT: s_lshr_b32 s13, s17, 16
@@ -11362,7 +10890,7 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16
; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8
-; SI-NEXT: s_cbranch_execnz .LBB85_4
+; SI-NEXT: s_cbranch_execnz .LBB85_3
; SI-NEXT: .LBB85_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v11, s17, 1.0
; SI-NEXT: v_add_f32_e64 v10, s16, 1.0
@@ -11372,16 +10900,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v11
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v11
; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v11
-; SI-NEXT: s_branch .LBB85_5
+; SI-NEXT: s_branch .LBB85_4
; SI-NEXT: .LBB85_3:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: s_branch .LBB85_2
-; SI-NEXT: .LBB85_4:
; SI-NEXT: v_mov_b32_e32 v10, s16
; SI-NEXT: v_mov_b32_e32 v11, s17
; SI-NEXT: v_mov_b32_e32 v5, s14
@@ -11390,7 +10910,7 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v3, s4
; SI-NEXT: v_mov_b32_e32 v8, s6
; SI-NEXT: v_mov_b32_e32 v1, s8
-; SI-NEXT: .LBB85_5: ; %end
+; SI-NEXT: .LBB85_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, v10
; SI-NEXT: v_mov_b32_e32 v2, v8
; SI-NEXT: v_mov_b32_e32 v4, v11
@@ -11400,7 +10920,7 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB85_3
+; VI-NEXT: s_cbranch_scc0 .LBB85_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; VI-NEXT: s_lshr_b32 s5, s17, 24
@@ -11408,7 +10928,7 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; VI-NEXT: s_lshr_b32 s10, s17, 8
; VI-NEXT: s_lshr_b32 s9, s16, 16
; VI-NEXT: s_lshr_b32 s11, s16, 8
-; VI-NEXT: s_cbranch_execnz .LBB85_4
+; VI-NEXT: s_cbranch_execnz .LBB85_3
; VI-NEXT: .LBB85_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v9, s17, 1.0
; VI-NEXT: v_add_f32_e64 v8, s16, 1.0
@@ -11418,16 +10938,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; VI-NEXT: v_lshrrev_b32_e32 v5, 8, v9
; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v8
; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v8
-; VI-NEXT: s_branch .LBB85_5
+; VI-NEXT: s_branch .LBB85_4
; VI-NEXT: .LBB85_3:
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr9
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr5
-; VI-NEXT: s_branch .LBB85_2
-; VI-NEXT: .LBB85_4:
; VI-NEXT: v_mov_b32_e32 v8, s16
; VI-NEXT: v_mov_b32_e32 v9, s17
; VI-NEXT: v_mov_b32_e32 v1, s11
@@ -11436,7 +10948,7 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v6, s8
; VI-NEXT: v_mov_b32_e32 v7, s5
; VI-NEXT: v_mov_b32_e32 v3, s4
-; VI-NEXT: .LBB85_5: ; %end
+; VI-NEXT: .LBB85_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, v8
; VI-NEXT: v_mov_b32_e32 v4, v9
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -11445,7 +10957,7 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB85_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB85_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; GFX9-NEXT: s_lshr_b32 s5, s17, 24
@@ -11453,7 +10965,7 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX9-NEXT: s_lshr_b32 s10, s17, 8
; GFX9-NEXT: s_lshr_b32 s9, s16, 16
; GFX9-NEXT: s_lshr_b32 s11, s16, 8
-; GFX9-NEXT: s_cbranch_execnz .LBB85_4
+; GFX9-NEXT: s_cbranch_execnz .LBB85_3
; GFX9-NEXT: .LBB85_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v9, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v8, s16, 1.0
@@ -11463,16 +10975,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v8
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v8
-; GFX9-NEXT: s_branch .LBB85_5
+; GFX9-NEXT: s_branch .LBB85_4
; GFX9-NEXT: .LBB85_3:
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr9
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr5
-; GFX9-NEXT: s_branch .LBB85_2
-; GFX9-NEXT: .LBB85_4:
; GFX9-NEXT: v_mov_b32_e32 v8, s16
; GFX9-NEXT: v_mov_b32_e32 v9, s17
; GFX9-NEXT: v_mov_b32_e32 v1, s11
@@ -11481,7 +10985,7 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v6, s8
; GFX9-NEXT: v_mov_b32_e32 v7, s5
; GFX9-NEXT: v_mov_b32_e32 v3, s4
-; GFX9-NEXT: .LBB85_5: ; %end
+; GFX9-NEXT: .LBB85_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, v8
; GFX9-NEXT: v_mov_b32_e32 v4, v9
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -11491,7 +10995,7 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB85_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB85_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
; GFX11-NEXT: s_lshr_b32 s3, s1, 24
@@ -11500,7 +11004,7 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX11-NEXT: s_lshr_b32 s6, s0, 16
; GFX11-NEXT: s_lshr_b32 s8, s0, 8
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB85_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB85_3
; GFX11-NEXT: .LBB85_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v9, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v8, s0, 1.0
@@ -11511,22 +11015,14 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v9
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v8
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v8
-; GFX11-NEXT: s_branch .LBB85_5
+; GFX11-NEXT: s_branch .LBB85_4
; GFX11-NEXT: .LBB85_3:
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr2
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: s_branch .LBB85_2
-; GFX11-NEXT: .LBB85_4:
; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1
; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s6
; GFX11-NEXT: v_dual_mov_b32 v5, s7 :: v_dual_mov_b32 v6, s5
; GFX11-NEXT: v_mov_b32_e32 v7, s3
; GFX11-NEXT: v_mov_b32_e32 v3, s2
-; GFX11-NEXT: .LBB85_5: ; %end
+; GFX11-NEXT: .LBB85_4: ; %end
; GFX11-NEXT: v_mov_b32_e32 v0, v8
; GFX11-NEXT: v_mov_b32_e32 v4, v9
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -11885,7 +11381,7 @@ define inreg <2 x float> @bitcast_v8i8_to_v2f32_scalar(<8 x i8> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB87_4
+; SI-NEXT: s_cbranch_scc0 .LBB87_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -11937,15 +11433,12 @@ define inreg <2 x float> @bitcast_v8i8_to_v2f32_scalar(<8 x i8> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB87_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB87_2
;
; VI-LABEL: bitcast_v8i8_to_v2f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB87_4
+; VI-NEXT: s_cbranch_scc0 .LBB87_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v1, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v2, s19
@@ -11985,15 +11478,12 @@ define inreg <2 x float> @bitcast_v8i8_to_v2f32_scalar(<8 x i8> inreg %a, i32 in
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x3000000, v1
; VI-NEXT: .LBB87_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB87_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; VI-NEXT: s_branch .LBB87_2
;
; GFX9-LABEL: bitcast_v8i8_to_v2f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB87_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB87_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v1, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v2, s19
@@ -12032,16 +11522,13 @@ define inreg <2 x float> @bitcast_v8i8_to_v2f32_scalar(<8 x i8> inreg %a, i32 in
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB87_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB87_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX9-NEXT: s_branch .LBB87_2
;
; GFX11-LABEL: bitcast_v8i8_to_v2f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB87_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB87_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
@@ -12084,9 +11571,6 @@ define inreg <2 x float> @bitcast_v8i8_to_v2f32_scalar(<8 x i8> inreg %a, i32 in
; GFX11-NEXT: v_or_b32_e32 v1, v3, v4
; GFX11-NEXT: .LBB87_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB87_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX11-NEXT: s_branch .LBB87_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12223,7 +11707,7 @@ define inreg <4 x half> @bitcast_v4i16_to_v4f16_scalar(<4 x i16> inreg %a, i32 i
; SI-NEXT: s_lshr_b32 s10, s17, 16
; SI-NEXT: s_lshr_b32 s11, s16, 16
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB89_4
+; SI-NEXT: s_cbranch_scc0 .LBB89_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s5, s17, 0xffff
; SI-NEXT: s_lshl_b32 s6, s10, 16
@@ -12257,16 +11741,12 @@ define inreg <4 x half> @bitcast_v4i16_to_v4f16_scalar(<4 x i16> inreg %a, i32 i
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB89_4:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB89_2
;
; VI-LABEL: bitcast_v4i16_to_v4f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB89_4
+; VI-NEXT: s_cbranch_scc0 .LBB89_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB89_3
; VI-NEXT: .LBB89_2: ; %cmp.true
@@ -12284,23 +11764,19 @@ define inreg <4 x half> @bitcast_v4i16_to_v4f16_scalar(<4 x i16> inreg %a, i32 i
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB89_4:
-; VI-NEXT: s_branch .LBB89_2
;
; GFX9-LABEL: bitcast_v4i16_to_v4f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB89_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB89_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB89_4
+; GFX9-NEXT: s_cbranch_execnz .LBB89_3
; GFX9-NEXT: .LBB89_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB89_3:
-; GFX9-NEXT: s_branch .LBB89_2
-; GFX9-NEXT: .LBB89_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -12310,17 +11786,15 @@ define inreg <4 x half> @bitcast_v4i16_to_v4f16_scalar(<4 x i16> inreg %a, i32 i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB89_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB89_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB89_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB89_3
; GFX11-NEXT: .LBB89_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB89_3:
-; GFX11-NEXT: s_branch .LBB89_2
-; GFX11-NEXT: .LBB89_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -12451,9 +11925,9 @@ define inreg <4 x i16> @bitcast_v4f16_to_v4i16_scalar(<4 x half> inreg %a, i32 i
; SI-NEXT: s_lshr_b32 s6, s17, 16
; SI-NEXT: s_lshr_b32 s7, s16, 16
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB91_3
+; SI-NEXT: s_cbranch_scc0 .LBB91_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB91_4
+; SI-NEXT: s_cbranch_execnz .LBB91_3
; SI-NEXT: .LBB91_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v2, s6
; SI-NEXT: v_cvt_f32_f16_e32 v0, s7
@@ -12472,15 +11946,13 @@ define inreg <4 x i16> @bitcast_v4f16_to_v4i16_scalar(<4 x half> inreg %a, i32 i
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_lshr_b64 v[2:3], v[0:1], 16
; SI-NEXT: v_or_b32_e32 v0, v5, v0
-; SI-NEXT: s_branch .LBB91_5
+; SI-NEXT: s_branch .LBB91_4
; SI-NEXT: .LBB91_3:
-; SI-NEXT: s_branch .LBB91_2
-; SI-NEXT: .LBB91_4:
; SI-NEXT: v_mov_b32_e32 v4, s6
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v2, s7
-; SI-NEXT: .LBB91_5: ; %end
+; SI-NEXT: .LBB91_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v0, v0, v2
@@ -12493,9 +11965,9 @@ define inreg <4 x i16> @bitcast_v4f16_to_v4i16_scalar(<4 x half> inreg %a, i32 i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB91_3
+; VI-NEXT: s_cbranch_scc0 .LBB91_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB91_4
+; VI-NEXT: s_cbranch_execnz .LBB91_3
; VI-NEXT: .LBB91_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -12510,8 +11982,6 @@ define inreg <4 x i16> @bitcast_v4f16_to_v4i16_scalar(<4 x half> inreg %a, i32 i
; VI-NEXT: v_or_b32_e32 v0, v2, v3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB91_3:
-; VI-NEXT: s_branch .LBB91_2
-; VI-NEXT: .LBB91_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -12520,17 +11990,15 @@ define inreg <4 x i16> @bitcast_v4f16_to_v4i16_scalar(<4 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB91_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB91_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB91_4
+; GFX9-NEXT: s_cbranch_execnz .LBB91_3
; GFX9-NEXT: .LBB91_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB91_3:
-; GFX9-NEXT: s_branch .LBB91_2
-; GFX9-NEXT: .LBB91_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -12540,17 +12008,15 @@ define inreg <4 x i16> @bitcast_v4f16_to_v4i16_scalar(<4 x half> inreg %a, i32 i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB91_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB91_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB91_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB91_3
; GFX11-NEXT: .LBB91_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB91_3:
-; GFX11-NEXT: s_branch .LBB91_2
-; GFX11-NEXT: .LBB91_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -12687,7 +12153,7 @@ define inreg <4 x bfloat> @bitcast_v4i16_to_v4bf16_scalar(<4 x i16> inreg %a, i3
; SI-NEXT: s_lshr_b32 s11, s17, 16
; SI-NEXT: s_lshr_b32 s10, s16, 16
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB93_4
+; SI-NEXT: s_cbranch_scc0 .LBB93_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s7, s16, 16
; SI-NEXT: s_lshl_b32 s9, s10, 16
@@ -12719,18 +12185,12 @@ define inreg <4 x bfloat> @bitcast_v4i16_to_v4bf16_scalar(<4 x i16> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s6
; SI-NEXT: v_lshr_b64 v[1:2], v[1:2], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB93_4:
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB93_2
;
; VI-LABEL: bitcast_v4i16_to_v4bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB93_4
+; VI-NEXT: s_cbranch_scc0 .LBB93_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB93_3
; VI-NEXT: .LBB93_2: ; %cmp.true
@@ -12748,23 +12208,19 @@ define inreg <4 x bfloat> @bitcast_v4i16_to_v4bf16_scalar(<4 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB93_4:
-; VI-NEXT: s_branch .LBB93_2
;
; GFX9-LABEL: bitcast_v4i16_to_v4bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB93_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB93_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB93_4
+; GFX9-NEXT: s_cbranch_execnz .LBB93_3
; GFX9-NEXT: .LBB93_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB93_3:
-; GFX9-NEXT: s_branch .LBB93_2
-; GFX9-NEXT: .LBB93_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -12774,17 +12230,15 @@ define inreg <4 x bfloat> @bitcast_v4i16_to_v4bf16_scalar(<4 x i16> inreg %a, i3
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB93_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB93_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB93_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB93_3
; GFX11-NEXT: .LBB93_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB93_3:
-; GFX11-NEXT: s_branch .LBB93_2
-; GFX11-NEXT: .LBB93_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -13082,7 +12536,7 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s6
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s5
; SI-NEXT: v_mul_f32_e64 v0, 1.0, s4
-; SI-NEXT: s_cbranch_scc0 .LBB95_4
+; SI-NEXT: s_cbranch_scc0 .LBB95_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v7
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v4
@@ -13112,20 +12566,14 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v5
; SI-NEXT: v_or_b32_e32 v1, v1, v2
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB95_4:
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr6
-; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: s_branch .LBB95_2
;
; VI-LABEL: bitcast_v4bf16_to_v4i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB95_3
+; VI-NEXT: s_cbranch_scc0 .LBB95_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB95_4
+; VI-NEXT: s_cbranch_execnz .LBB95_3
; VI-NEXT: .LBB95_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v3, 0x40c00000
@@ -13167,8 +12615,6 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v1, v2
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB95_3:
-; VI-NEXT: s_branch .LBB95_2
-; VI-NEXT: .LBB95_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -13177,9 +12623,9 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB95_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB95_4
+; GFX9-NEXT: s_cbranch_execnz .LBB95_3
; GFX9-NEXT: .LBB95_2: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s17
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -13221,8 +12667,6 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3
; GFX9-NEXT: v_and_or_b32 v1, v1, v4, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB95_3:
-; GFX9-NEXT: s_branch .LBB95_2
-; GFX9-NEXT: .LBB95_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -13232,10 +12676,10 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB95_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB95_3
; GFX11-TRUE16-NEXT: .LBB95_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
@@ -13274,8 +12718,6 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.h
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB95_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB95_2
-; GFX11-TRUE16-NEXT: .LBB95_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -13284,10 +12726,10 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB95_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB95_3
; GFX11-FAKE16-NEXT: .LBB95_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
@@ -13329,8 +12771,6 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v2
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB95_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB95_2
-; GFX11-FAKE16-NEXT: .LBB95_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -13594,7 +13034,7 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; SI-NEXT: s_lshr_b32 s14, s17, 16
; SI-NEXT: s_lshr_b32 s15, s16, 16
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB97_4
+; SI-NEXT: s_cbranch_scc0 .LBB97_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s15, 16
@@ -13635,20 +13075,12 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; SI-NEXT: v_mov_b32_e32 v6, s14
; SI-NEXT: v_mov_b32_e32 v7, s7
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB97_4:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB97_2
;
; VI-LABEL: bitcast_v4i16_to_v8i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB97_4
+; VI-NEXT: s_cbranch_scc0 .LBB97_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; VI-NEXT: s_lshr_b32 s10, s17, 24
@@ -13685,21 +13117,12 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; VI-NEXT: v_mov_b32_e32 v6, s8
; VI-NEXT: v_mov_b32_e32 v7, s10
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB97_4:
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr9
-; VI-NEXT: ; implicit-def: $sgpr5
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB97_2
;
; GFX9-LABEL: bitcast_v4i16_to_v8i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB97_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB97_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; GFX9-NEXT: s_lshr_b32 s5, s17, 24
@@ -13707,7 +13130,7 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; GFX9-NEXT: s_lshr_b32 s10, s17, 8
; GFX9-NEXT: s_lshr_b32 s9, s16, 16
; GFX9-NEXT: s_lshr_b32 s11, s16, 8
-; GFX9-NEXT: s_cbranch_execnz .LBB97_4
+; GFX9-NEXT: s_cbranch_execnz .LBB97_3
; GFX9-NEXT: .LBB97_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v9, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, s16, 3 op_sel_hi:[1,0]
@@ -13717,16 +13140,8 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v8
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v8
-; GFX9-NEXT: s_branch .LBB97_5
+; GFX9-NEXT: s_branch .LBB97_4
; GFX9-NEXT: .LBB97_3:
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr9
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr5
-; GFX9-NEXT: s_branch .LBB97_2
-; GFX9-NEXT: .LBB97_4:
; GFX9-NEXT: v_mov_b32_e32 v8, s16
; GFX9-NEXT: v_mov_b32_e32 v9, s17
; GFX9-NEXT: v_mov_b32_e32 v1, s11
@@ -13735,7 +13150,7 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; GFX9-NEXT: v_mov_b32_e32 v6, s8
; GFX9-NEXT: v_mov_b32_e32 v7, s5
; GFX9-NEXT: v_mov_b32_e32 v3, s4
-; GFX9-NEXT: .LBB97_5: ; %end
+; GFX9-NEXT: .LBB97_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, v8
; GFX9-NEXT: v_mov_b32_e32 v4, v9
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -13745,7 +13160,7 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB97_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB97_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
; GFX11-NEXT: s_lshr_b32 s3, s1, 24
@@ -13754,7 +13169,7 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; GFX11-NEXT: s_lshr_b32 s6, s0, 16
; GFX11-NEXT: s_lshr_b32 s8, s0, 8
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB97_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB97_3
; GFX11-NEXT: .LBB97_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v8, s0, 3 op_sel_hi:[1,0]
@@ -13765,22 +13180,14 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v9
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v8
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v8
-; GFX11-NEXT: s_branch .LBB97_5
+; GFX11-NEXT: s_branch .LBB97_4
; GFX11-NEXT: .LBB97_3:
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr2
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: s_branch .LBB97_2
-; GFX11-NEXT: .LBB97_4:
; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1
; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s6
; GFX11-NEXT: v_dual_mov_b32 v5, s7 :: v_dual_mov_b32 v6, s5
; GFX11-NEXT: v_mov_b32_e32 v7, s3
; GFX11-NEXT: v_mov_b32_e32 v3, s2
-; GFX11-NEXT: .LBB97_5: ; %end
+; GFX11-NEXT: .LBB97_4: ; %end
; GFX11-NEXT: v_mov_b32_e32 v0, v8
; GFX11-NEXT: v_mov_b32_e32 v4, v9
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -14147,7 +13554,7 @@ define inreg <4 x i16> @bitcast_v8i8_to_v4i16_scalar(<8 x i8> inreg %a, i32 inre
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB99_4
+; SI-NEXT: s_cbranch_scc0 .LBB99_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -14210,17 +13617,12 @@ define inreg <4 x i16> @bitcast_v8i8_to_v4i16_scalar(<8 x i8> inreg %a, i32 inre
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB99_4:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB99_2
;
; VI-LABEL: bitcast_v8i8_to_v4i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB99_4
+; VI-NEXT: s_cbranch_scc0 .LBB99_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v1, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v2, s19
@@ -14260,15 +13662,12 @@ define inreg <4 x i16> @bitcast_v8i8_to_v4i16_scalar(<8 x i8> inreg %a, i32 inre
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x3000000, v1
; VI-NEXT: .LBB99_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB99_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; VI-NEXT: s_branch .LBB99_2
;
; GFX9-LABEL: bitcast_v8i8_to_v4i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB99_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB99_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v1, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v2, s19
@@ -14307,16 +13706,13 @@ define inreg <4 x i16> @bitcast_v8i8_to_v4i16_scalar(<8 x i8> inreg %a, i32 inre
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB99_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB99_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX9-NEXT: s_branch .LBB99_2
;
; GFX11-LABEL: bitcast_v8i8_to_v4i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB99_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB99_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
@@ -14359,9 +13755,6 @@ define inreg <4 x i16> @bitcast_v8i8_to_v4i16_scalar(<8 x i8> inreg %a, i32 inre
; GFX11-NEXT: v_or_b32_e32 v1, v3, v4
; GFX11-NEXT: .LBB99_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB99_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX11-NEXT: s_branch .LBB99_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14505,13 +13898,13 @@ define inreg <4 x bfloat> @bitcast_v4f16_to_v4bf16_scalar(<4 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s7, s17, 16
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB101_3
+; SI-NEXT: s_cbranch_scc0 .LBB101_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s8, s16, 16
; SI-NEXT: s_lshl_b32 s9, s6, 16
; SI-NEXT: s_lshl_b32 s10, s17, 16
; SI-NEXT: s_lshl_b32 s11, s7, 16
-; SI-NEXT: s_cbranch_execnz .LBB101_4
+; SI-NEXT: s_cbranch_execnz .LBB101_3
; SI-NEXT: .LBB101_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s7
; SI-NEXT: v_cvt_f32_f16_e32 v1, s17
@@ -14529,19 +13922,13 @@ define inreg <4 x bfloat> @bitcast_v4f16_to_v4bf16_scalar(<4 x half> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v4
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v5
-; SI-NEXT: s_branch .LBB101_5
+; SI-NEXT: s_branch .LBB101_4
; SI-NEXT: .LBB101_3:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: s_branch .LBB101_2
-; SI-NEXT: .LBB101_4:
; SI-NEXT: v_mov_b32_e32 v3, s11
; SI-NEXT: v_mov_b32_e32 v2, s10
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: v_mov_b32_e32 v0, s8
-; SI-NEXT: .LBB101_5: ; %end
+; SI-NEXT: .LBB101_4: ; %end
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
@@ -14556,9 +13943,9 @@ define inreg <4 x bfloat> @bitcast_v4f16_to_v4bf16_scalar(<4 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB101_3
+; VI-NEXT: s_cbranch_scc0 .LBB101_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB101_4
+; VI-NEXT: s_cbranch_execnz .LBB101_3
; VI-NEXT: .LBB101_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -14573,8 +13960,6 @@ define inreg <4 x bfloat> @bitcast_v4f16_to_v4bf16_scalar(<4 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v2, v3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB101_3:
-; VI-NEXT: s_branch .LBB101_2
-; VI-NEXT: .LBB101_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -14583,17 +13968,15 @@ define inreg <4 x bfloat> @bitcast_v4f16_to_v4bf16_scalar(<4 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB101_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB101_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB101_4
+; GFX9-NEXT: s_cbranch_execnz .LBB101_3
; GFX9-NEXT: .LBB101_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB101_3:
-; GFX9-NEXT: s_branch .LBB101_2
-; GFX9-NEXT: .LBB101_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -14603,17 +13986,15 @@ define inreg <4 x bfloat> @bitcast_v4f16_to_v4bf16_scalar(<4 x half> inreg %a, i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB101_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB101_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB101_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB101_3
; GFX11-NEXT: .LBB101_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB101_3:
-; GFX11-NEXT: s_branch .LBB101_2
-; GFX11-NEXT: .LBB101_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -14920,7 +14301,7 @@ define inreg <4 x half> @bitcast_v4bf16_to_v4f16_scalar(<4 x bfloat> inreg %a, i
; SI-NEXT: v_mul_f32_e64 v11, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s5
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s7
-; SI-NEXT: s_cbranch_scc0 .LBB103_4
+; SI-NEXT: s_cbranch_scc0 .LBB103_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v11
; SI-NEXT: v_lshr_b64 v[1:2], v[3:4], 16
@@ -14952,19 +14333,14 @@ define inreg <4 x half> @bitcast_v4bf16_to_v4f16_scalar(<4 x bfloat> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v4
; SI-NEXT: v_or_b32_e32 v1, v1, v2
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB103_4:
-; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr9
-; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB103_2
;
; VI-LABEL: bitcast_v4bf16_to_v4f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB103_3
+; VI-NEXT: s_cbranch_scc0 .LBB103_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB103_4
+; VI-NEXT: s_cbranch_execnz .LBB103_3
; VI-NEXT: .LBB103_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v3, 0x40c00000
@@ -15006,8 +14382,6 @@ define inreg <4 x half> @bitcast_v4bf16_to_v4f16_scalar(<4 x bfloat> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v1, v2
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB103_3:
-; VI-NEXT: s_branch .LBB103_2
-; VI-NEXT: .LBB103_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -15016,9 +14390,9 @@ define inreg <4 x half> @bitcast_v4bf16_to_v4f16_scalar(<4 x bfloat> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB103_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB103_4
+; GFX9-NEXT: s_cbranch_execnz .LBB103_3
; GFX9-NEXT: .LBB103_2: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s17
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -15062,8 +14436,6 @@ define inreg <4 x half> @bitcast_v4bf16_to_v4f16_scalar(<4 x bfloat> inreg %a, i
; GFX9-NEXT: v_lshl_or_b32 v1, v1, 16, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB103_3:
-; GFX9-NEXT: s_branch .LBB103_2
-; GFX9-NEXT: .LBB103_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -15073,10 +14445,10 @@ define inreg <4 x half> @bitcast_v4bf16_to_v4f16_scalar(<4 x bfloat> inreg %a, i
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB103_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB103_3
; GFX11-TRUE16-NEXT: .LBB103_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
@@ -15122,8 +14494,6 @@ define inreg <4 x half> @bitcast_v4bf16_to_v4f16_scalar(<4 x bfloat> inreg %a, i
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB103_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB103_2
-; GFX11-TRUE16-NEXT: .LBB103_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -15132,10 +14502,10 @@ define inreg <4 x half> @bitcast_v4bf16_to_v4f16_scalar(<4 x bfloat> inreg %a, i
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB103_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB103_3
; GFX11-FAKE16-NEXT: .LBB103_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
@@ -15182,8 +14552,6 @@ define inreg <4 x half> @bitcast_v4bf16_to_v4f16_scalar(<4 x bfloat> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v2
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB103_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB103_2
-; GFX11-FAKE16-NEXT: .LBB103_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -15451,7 +14819,7 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; SI-NEXT: s_lshr_b32 s14, s17, 16
; SI-NEXT: s_lshr_b32 s15, s16, 16
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB105_3
+; SI-NEXT: s_cbranch_scc0 .LBB105_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s15, 16
@@ -15464,7 +14832,7 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 8
; SI-NEXT: s_lshr_b32 s7, s5, 8
; SI-NEXT: s_bfe_u32 s9, s14, 0x80008
-; SI-NEXT: s_cbranch_execnz .LBB105_4
+; SI-NEXT: s_cbranch_execnz .LBB105_3
; SI-NEXT: .LBB105_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s15
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -15487,16 +14855,8 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; SI-NEXT: v_lshr_b64 v[1:2], v[9:10], 8
; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v10
; SI-NEXT: v_bfe_u32 v7, v6, 8, 8
-; SI-NEXT: s_branch .LBB105_5
+; SI-NEXT: s_branch .LBB105_4
; SI-NEXT: .LBB105_3:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: s_branch .LBB105_2
-; SI-NEXT: .LBB105_4:
; SI-NEXT: v_mov_b32_e32 v6, s14
; SI-NEXT: v_mov_b32_e32 v7, s9
; SI-NEXT: v_mov_b32_e32 v10, s5
@@ -15505,7 +14865,7 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; SI-NEXT: v_mov_b32_e32 v3, s6
; SI-NEXT: v_mov_b32_e32 v4, s8
; SI-NEXT: v_mov_b32_e32 v1, s10
-; SI-NEXT: .LBB105_5: ; %end
+; SI-NEXT: .LBB105_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, v9
; SI-NEXT: v_mov_b32_e32 v2, v4
; SI-NEXT: v_mov_b32_e32 v4, v10
@@ -15515,7 +14875,7 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB105_3
+; VI-NEXT: s_cbranch_scc0 .LBB105_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; VI-NEXT: s_lshr_b32 s8, s17, 24
@@ -15523,7 +14883,7 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; VI-NEXT: s_lshr_b32 s5, s17, 8
; VI-NEXT: s_lshr_b32 s11, s16, 16
; VI-NEXT: s_lshr_b32 s9, s16, 8
-; VI-NEXT: s_cbranch_execnz .LBB105_4
+; VI-NEXT: s_cbranch_execnz .LBB105_3
; VI-NEXT: .LBB105_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -15543,14 +14903,6 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; VI-NEXT: v_mov_b32_e32 v4, v8
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB105_3:
-; VI-NEXT: ; implicit-def: $sgpr9
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr5
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: s_branch .LBB105_2
-; VI-NEXT: .LBB105_4:
; VI-NEXT: v_mov_b32_e32 v2, s11
; VI-NEXT: v_mov_b32_e32 v6, s10
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -15565,7 +14917,7 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB105_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB105_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; GFX9-NEXT: s_lshr_b32 s5, s17, 24
@@ -15573,7 +14925,7 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX9-NEXT: s_lshr_b32 s10, s17, 8
; GFX9-NEXT: s_lshr_b32 s9, s16, 16
; GFX9-NEXT: s_lshr_b32 s11, s16, 8
-; GFX9-NEXT: s_cbranch_execnz .LBB105_4
+; GFX9-NEXT: s_cbranch_execnz .LBB105_3
; GFX9-NEXT: .LBB105_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v9, s17, v0 op_sel_hi:[1,0]
@@ -15584,16 +14936,8 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v8
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v8
-; GFX9-NEXT: s_branch .LBB105_5
+; GFX9-NEXT: s_branch .LBB105_4
; GFX9-NEXT: .LBB105_3:
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr9
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr5
-; GFX9-NEXT: s_branch .LBB105_2
-; GFX9-NEXT: .LBB105_4:
; GFX9-NEXT: v_mov_b32_e32 v8, s16
; GFX9-NEXT: v_mov_b32_e32 v9, s17
; GFX9-NEXT: v_mov_b32_e32 v1, s11
@@ -15602,7 +14946,7 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX9-NEXT: v_mov_b32_e32 v6, s8
; GFX9-NEXT: v_mov_b32_e32 v7, s5
; GFX9-NEXT: v_mov_b32_e32 v3, s4
-; GFX9-NEXT: .LBB105_5: ; %end
+; GFX9-NEXT: .LBB105_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, v8
; GFX9-NEXT: v_mov_b32_e32 v4, v9
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -15612,7 +14956,7 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB105_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB105_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
; GFX11-NEXT: s_lshr_b32 s3, s1, 24
@@ -15621,7 +14965,7 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX11-NEXT: s_lshr_b32 s6, s0, 16
; GFX11-NEXT: s_lshr_b32 s8, s0, 8
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB105_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB105_3
; GFX11-NEXT: .LBB105_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s0 op_sel_hi:[0,1]
@@ -15632,22 +14976,14 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v9
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v8
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v8
-; GFX11-NEXT: s_branch .LBB105_5
+; GFX11-NEXT: s_branch .LBB105_4
; GFX11-NEXT: .LBB105_3:
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr2
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: s_branch .LBB105_2
-; GFX11-NEXT: .LBB105_4:
; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1
; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s6
; GFX11-NEXT: v_dual_mov_b32 v5, s7 :: v_dual_mov_b32 v6, s5
; GFX11-NEXT: v_mov_b32_e32 v7, s3
; GFX11-NEXT: v_mov_b32_e32 v3, s2
-; GFX11-NEXT: .LBB105_5: ; %end
+; GFX11-NEXT: .LBB105_4: ; %end
; GFX11-NEXT: v_mov_b32_e32 v0, v8
; GFX11-NEXT: v_mov_b32_e32 v4, v9
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -16014,7 +15350,7 @@ define inreg <4 x half> @bitcast_v8i8_to_v4f16_scalar(<8 x i8> inreg %a, i32 inr
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB107_4
+; SI-NEXT: s_cbranch_scc0 .LBB107_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -16077,17 +15413,12 @@ define inreg <4 x half> @bitcast_v8i8_to_v4f16_scalar(<8 x i8> inreg %a, i32 inr
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB107_4:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB107_2
;
; VI-LABEL: bitcast_v8i8_to_v4f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB107_4
+; VI-NEXT: s_cbranch_scc0 .LBB107_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v1, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v2, s19
@@ -16127,15 +15458,12 @@ define inreg <4 x half> @bitcast_v8i8_to_v4f16_scalar(<8 x i8> inreg %a, i32 inr
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x3000000, v1
; VI-NEXT: .LBB107_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB107_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; VI-NEXT: s_branch .LBB107_2
;
; GFX9-LABEL: bitcast_v8i8_to_v4f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB107_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB107_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v1, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v2, s19
@@ -16174,16 +15502,13 @@ define inreg <4 x half> @bitcast_v8i8_to_v4f16_scalar(<8 x i8> inreg %a, i32 inr
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB107_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB107_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX9-NEXT: s_branch .LBB107_2
;
; GFX11-LABEL: bitcast_v8i8_to_v4f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB107_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB107_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
@@ -16226,9 +15551,6 @@ define inreg <4 x half> @bitcast_v8i8_to_v4f16_scalar(<8 x i8> inreg %a, i32 inr
; GFX11-NEXT: v_or_b32_e32 v1, v3, v4
; GFX11-NEXT: .LBB107_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB107_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX11-NEXT: s_branch .LBB107_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16639,7 +15961,7 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; SI-NEXT: v_mul_f32_e64 v0, 1.0, s7
; SI-NEXT: v_mul_f32_e64 v14, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB109_4
+; SI-NEXT: s_cbranch_scc0 .LBB109_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v15
; SI-NEXT: v_lshr_b64 v[11:12], v[0:1], 16
@@ -16675,20 +15997,12 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v4, v12
; SI-NEXT: v_mov_b32_e32 v5, v8
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB109_4:
-; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: ; implicit-def: $vgpr9
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr8
-; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: s_branch .LBB109_2
;
; VI-LABEL: bitcast_v4bf16_to_v8i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB109_3
+; VI-NEXT: s_cbranch_scc0 .LBB109_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; VI-NEXT: s_lshr_b32 s5, s17, 24
@@ -16696,7 +16010,7 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; VI-NEXT: s_lshr_b32 s8, s17, 8
; VI-NEXT: s_lshr_b32 s10, s16, 16
; VI-NEXT: s_lshr_b32 s9, s16, 8
-; VI-NEXT: s_cbranch_execnz .LBB109_4
+; VI-NEXT: s_cbranch_execnz .LBB109_3
; VI-NEXT: .LBB109_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -16746,14 +16060,6 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, v8
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB109_3:
-; VI-NEXT: ; implicit-def: $sgpr9
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr5
-; VI-NEXT: s_branch .LBB109_2
-; VI-NEXT: .LBB109_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v6, s11
; VI-NEXT: v_mov_b32_e32 v2, s10
@@ -16768,7 +16074,7 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB109_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; GFX9-NEXT: s_lshr_b32 s9, s17, 24
@@ -16776,7 +16082,7 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; GFX9-NEXT: s_lshr_b32 s10, s17, 8
; GFX9-NEXT: s_lshr_b32 s8, s16, 16
; GFX9-NEXT: s_lshr_b32 s5, s16, 8
-; GFX9-NEXT: s_cbranch_execnz .LBB109_4
+; GFX9-NEXT: s_cbranch_execnz .LBB109_3
; GFX9-NEXT: .LBB109_2: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, 0x40c00000
@@ -16827,14 +16133,6 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v4, v8
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB109_3:
-; GFX9-NEXT: ; implicit-def: $sgpr5
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr9
-; GFX9-NEXT: s_branch .LBB109_2
-; GFX9-NEXT: .LBB109_4:
; GFX9-NEXT: v_mov_b32_e32 v6, s11
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v7, s9
@@ -16850,7 +16148,7 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB109_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s1, 24
@@ -16859,7 +16157,7 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s0, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s3, s0, 8
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB109_3
; GFX11-TRUE16-NEXT: .LBB109_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -16915,14 +16213,6 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v8
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB109_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr3
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr5
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr2
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
-; GFX11-TRUE16-NEXT: s_branch .LBB109_2
-; GFX11-TRUE16-NEXT: .LBB109_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s6
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v5, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mov_b32 v1, s3
@@ -16934,7 +16224,7 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB109_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s1, 24
@@ -16943,7 +16233,7 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s3, s0, 8
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB109_3
; GFX11-FAKE16-NEXT: .LBB109_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -16998,14 +16288,6 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v8
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB109_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr3
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr5
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr2
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
-; GFX11-FAKE16-NEXT: s_branch .LBB109_2
-; GFX11-FAKE16-NEXT: .LBB109_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s6
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v5, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mov_b32 v1, s3
@@ -17375,7 +16657,7 @@ define inreg <4 x bfloat> @bitcast_v8i8_to_v4bf16_scalar(<8 x i8> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB111_4
+; SI-NEXT: s_cbranch_scc0 .LBB111_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s4, s4, 16
@@ -17435,18 +16717,12 @@ define inreg <4 x bfloat> @bitcast_v8i8_to_v4bf16_scalar(<8 x i8> inreg %a, i32
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s7
; SI-NEXT: v_lshr_b64 v[1:2], v[1:2], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB111_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: s_branch .LBB111_2
;
; VI-LABEL: bitcast_v8i8_to_v4bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB111_4
+; VI-NEXT: s_cbranch_scc0 .LBB111_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v1, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v2, s19
@@ -17486,15 +16762,12 @@ define inreg <4 x bfloat> @bitcast_v8i8_to_v4bf16_scalar(<8 x i8> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x3000000, v1
; VI-NEXT: .LBB111_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB111_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; VI-NEXT: s_branch .LBB111_2
;
; GFX9-LABEL: bitcast_v8i8_to_v4bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB111_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB111_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v1, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v2, s19
@@ -17533,16 +16806,13 @@ define inreg <4 x bfloat> @bitcast_v8i8_to_v4bf16_scalar(<8 x i8> inreg %a, i32
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB111_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB111_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX9-NEXT: s_branch .LBB111_2
;
; GFX11-LABEL: bitcast_v8i8_to_v4bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB111_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB111_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
@@ -17585,9 +16855,6 @@ define inreg <4 x bfloat> @bitcast_v8i8_to_v4bf16_scalar(<8 x i8> inreg %a, i32
; GFX11-NEXT: v_or_b32_e32 v1, v3, v4
; GFX11-NEXT: .LBB111_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB111_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX11-NEXT: s_branch .LBB111_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
index 2254fcdd9c7ad..87962196497b7 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
@@ -178,7 +178,7 @@ define inreg <22 x float> @bitcast_v22i32_to_v22f32_scalar(<22 x i32> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s12, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s13, v0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -228,8 +228,6 @@ define inreg <22 x float> @bitcast_v22i32_to_v22f32_scalar(<22 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v20, s7
; SI-NEXT: v_mov_b32_e32 v21, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v22i32_to_v22f32_scalar:
; VI: ; %bb.0:
@@ -244,7 +242,7 @@ define inreg <22 x float> @bitcast_v22i32_to_v22f32_scalar(<22 x i32> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s12, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s13, v0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -294,8 +292,6 @@ define inreg <22 x float> @bitcast_v22i32_to_v22f32_scalar(<22 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v20, s7
; VI-NEXT: v_mov_b32_e32 v21, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v22i32_to_v22f32_scalar:
; GFX9: ; %bb.0:
@@ -310,7 +306,7 @@ define inreg <22 x float> @bitcast_v22i32_to_v22f32_scalar(<22 x i32> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s12, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s13, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -360,8 +356,6 @@ define inreg <22 x float> @bitcast_v22i32_to_v22f32_scalar(<22 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v20, s7
; GFX9-NEXT: v_mov_b32_e32 v21, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v22i32_to_v22f32_scalar:
; GFX11: ; %bb.0:
@@ -373,7 +367,7 @@ define inreg <22 x float> @bitcast_v22i32_to_v22f32_scalar(<22 x i32> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s7, v0
; GFX11-NEXT: s_cmp_lg_u32 s8, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -414,8 +408,6 @@ define inreg <22 x float> @bitcast_v22i32_to_v22f32_scalar(<22 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v18, s7 :: v_dual_mov_b32 v19, s6
; GFX11-NEXT: v_dual_mov_b32 v20, s5 :: v_dual_mov_b32 v21, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -624,9 +616,9 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v21, s57, 1.0
; SI-NEXT: v_add_f32_e64 v20, s56, 1.0
@@ -650,10 +642,8 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB3_5
+; SI-NEXT: s_branch .LBB3_4
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -686,7 +676,7 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB3_5: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -747,9 +737,9 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v21, s57, 1.0
; VI-NEXT: v_add_f32_e64 v20, s56, 1.0
@@ -773,10 +763,8 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB3_5
+; VI-NEXT: s_branch .LBB3_4
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -809,7 +797,7 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB3_5: ; %end
+; VI-NEXT: .LBB3_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -870,9 +858,9 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v21, s57, 1.0
; GFX9-NEXT: v_add_f32_e64 v20, s56, 1.0
@@ -896,10 +884,8 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB3_5
+; GFX9-NEXT: s_branch .LBB3_4
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -932,7 +918,7 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB3_5: ; %end
+; GFX9-NEXT: .LBB3_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -994,10 +980,10 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v21, s57, 1.0
; GFX11-NEXT: v_add_f32_e64 v20, s56, 1.0
@@ -1021,10 +1007,8 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB3_5
+; GFX11-NEXT: s_branch .LBB3_4
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -1041,7 +1025,7 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB3_5: ; %end
+; GFX11-NEXT: .LBB3_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -1248,7 +1232,7 @@ define inreg <11 x i64> @bitcast_v22i32_to_v11i64_scalar(<22 x i32> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s12, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s13, v0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB5_3
; SI-NEXT: .LBB5_2: ; %cmp.true
@@ -1298,8 +1282,6 @@ define inreg <11 x i64> @bitcast_v22i32_to_v11i64_scalar(<22 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v20, s7
; SI-NEXT: v_mov_b32_e32 v21, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v22i32_to_v11i64_scalar:
; VI: ; %bb.0:
@@ -1314,7 +1296,7 @@ define inreg <11 x i64> @bitcast_v22i32_to_v11i64_scalar(<22 x i32> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s12, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s13, v0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -1364,8 +1346,6 @@ define inreg <11 x i64> @bitcast_v22i32_to_v11i64_scalar(<22 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v20, s7
; VI-NEXT: v_mov_b32_e32 v21, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v22i32_to_v11i64_scalar:
; GFX9: ; %bb.0:
@@ -1380,7 +1360,7 @@ define inreg <11 x i64> @bitcast_v22i32_to_v11i64_scalar(<22 x i32> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s12, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s13, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -1430,8 +1410,6 @@ define inreg <11 x i64> @bitcast_v22i32_to_v11i64_scalar(<22 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v20, s7
; GFX9-NEXT: v_mov_b32_e32 v21, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v22i32_to_v11i64_scalar:
; GFX11: ; %bb.0:
@@ -1443,7 +1421,7 @@ define inreg <11 x i64> @bitcast_v22i32_to_v11i64_scalar(<22 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s7, v0
; GFX11-NEXT: s_cmp_lg_u32 s8, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -1484,8 +1462,6 @@ define inreg <11 x i64> @bitcast_v22i32_to_v11i64_scalar(<22 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v18, s7 :: v_dual_mov_b32 v19, s6
; GFX11-NEXT: v_dual_mov_b32 v20, s5 :: v_dual_mov_b32 v21, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1681,7 +1657,7 @@ define inreg <22 x i32> @bitcast_v11i64_to_v22i32_scalar(<11 x i64> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s12, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s13, v0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB7_3
; SI-NEXT: .LBB7_2: ; %cmp.true
@@ -1731,8 +1707,6 @@ define inreg <22 x i32> @bitcast_v11i64_to_v22i32_scalar(<11 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v20, s7
; SI-NEXT: v_mov_b32_e32 v21, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v11i64_to_v22i32_scalar:
; VI: ; %bb.0:
@@ -1747,7 +1721,7 @@ define inreg <22 x i32> @bitcast_v11i64_to_v22i32_scalar(<11 x i64> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s12, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s13, v0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -1797,8 +1771,6 @@ define inreg <22 x i32> @bitcast_v11i64_to_v22i32_scalar(<11 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v20, s7
; VI-NEXT: v_mov_b32_e32 v21, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v11i64_to_v22i32_scalar:
; GFX9: ; %bb.0:
@@ -1813,7 +1785,7 @@ define inreg <22 x i32> @bitcast_v11i64_to_v22i32_scalar(<11 x i64> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s12, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s13, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
@@ -1863,8 +1835,6 @@ define inreg <22 x i32> @bitcast_v11i64_to_v22i32_scalar(<11 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v20, s7
; GFX9-NEXT: v_mov_b32_e32 v21, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v11i64_to_v22i32_scalar:
; GFX11: ; %bb.0:
@@ -1876,7 +1846,7 @@ define inreg <22 x i32> @bitcast_v11i64_to_v22i32_scalar(<11 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s7, v0
; GFX11-NEXT: s_cmp_lg_u32 s8, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
@@ -1917,8 +1887,6 @@ define inreg <22 x i32> @bitcast_v11i64_to_v22i32_scalar(<11 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v18, s7 :: v_dual_mov_b32 v19, s6
; GFX11-NEXT: v_dual_mov_b32 v20, s5 :: v_dual_mov_b32 v21, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2108,7 +2076,7 @@ define inreg <11 x double> @bitcast_v22i32_to_v11f64_scalar(<22 x i32> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s12, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s13, v0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB9_3
; SI-NEXT: .LBB9_2: ; %cmp.true
@@ -2158,8 +2126,6 @@ define inreg <11 x double> @bitcast_v22i32_to_v11f64_scalar(<22 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v20, s7
; SI-NEXT: v_mov_b32_e32 v21, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v22i32_to_v11f64_scalar:
; VI: ; %bb.0:
@@ -2174,7 +2140,7 @@ define inreg <11 x double> @bitcast_v22i32_to_v11f64_scalar(<22 x i32> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s12, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s13, v0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -2224,8 +2190,6 @@ define inreg <11 x double> @bitcast_v22i32_to_v11f64_scalar(<22 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v20, s7
; VI-NEXT: v_mov_b32_e32 v21, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v22i32_to_v11f64_scalar:
; GFX9: ; %bb.0:
@@ -2240,7 +2204,7 @@ define inreg <11 x double> @bitcast_v22i32_to_v11f64_scalar(<22 x i32> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s12, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s13, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -2290,8 +2254,6 @@ define inreg <11 x double> @bitcast_v22i32_to_v11f64_scalar(<22 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v20, s7
; GFX9-NEXT: v_mov_b32_e32 v21, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v22i32_to_v11f64_scalar:
; GFX11: ; %bb.0:
@@ -2303,7 +2265,7 @@ define inreg <11 x double> @bitcast_v22i32_to_v11f64_scalar(<22 x i32> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s7, v0
; GFX11-NEXT: s_cmp_lg_u32 s8, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -2344,8 +2306,6 @@ define inreg <11 x double> @bitcast_v22i32_to_v11f64_scalar(<22 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v18, s7 :: v_dual_mov_b32 v19, s6
; GFX11-NEXT: v_dual_mov_b32 v20, s5 :: v_dual_mov_b32 v21, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2521,9 +2481,9 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
; SI-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
@@ -2536,10 +2496,8 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; SI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; SI-NEXT: s_branch .LBB11_5
+; SI-NEXT: s_branch .LBB11_4
; SI-NEXT: .LBB11_3:
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -2572,7 +2530,7 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB11_5: ; %end
+; SI-NEXT: .LBB11_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -2633,9 +2591,9 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
; VI-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
@@ -2648,10 +2606,8 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; VI-NEXT: s_branch .LBB11_5
+; VI-NEXT: s_branch .LBB11_4
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -2684,7 +2640,7 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB11_5: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -2745,9 +2701,9 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
; GFX9-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
@@ -2760,10 +2716,8 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX9-NEXT: s_branch .LBB11_5
+; GFX9-NEXT: s_branch .LBB11_4
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -2796,7 +2750,7 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB11_5: ; %end
+; GFX9-NEXT: .LBB11_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -2858,10 +2812,10 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
; GFX11-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
@@ -2874,10 +2828,8 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX11-NEXT: s_branch .LBB11_5
+; GFX11-NEXT: s_branch .LBB11_4
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -2894,7 +2846,7 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB11_5: ; %end
+; GFX11-NEXT: .LBB11_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -3569,7 +3521,7 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s11, v1
; SI-NEXT: s_cmp_lg_u32 s10, 0
; SI-NEXT: v_readfirstlane_b32 s10, v0
-; SI-NEXT: s_cbranch_scc0 .LBB13_4
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s76, s5, 16
; SI-NEXT: s_lshr_b32 s77, s7, 16
@@ -3729,30 +3681,6 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v20, s4
; SI-NEXT: v_mov_b32_e32 v21, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB13_4:
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: s_branch .LBB13_2
;
; VI-LABEL: bitcast_v22i32_to_v44i16_scalar:
; VI: ; %bb.0:
@@ -3767,7 +3695,7 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s12, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s13, v0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s14, s6, 16
; VI-NEXT: s_lshr_b32 s15, s7, 16
@@ -3927,30 +3855,6 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v20, s7
; VI-NEXT: v_mov_b32_e32 v21, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v22i32_to_v44i16_scalar:
; GFX9: ; %bb.0:
@@ -3965,7 +3869,7 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s12, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s13, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s14, s6, 16
; GFX9-NEXT: s_lshr_b32 s15, s7, 16
@@ -4081,30 +3985,6 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v20, s7
; GFX9-NEXT: v_mov_b32_e32 v21, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v22i32_to_v44i16_scalar:
; GFX11: ; %bb.0:
@@ -4116,7 +3996,7 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s7, v0
; GFX11-NEXT: s_cmp_lg_u32 s8, 0
; GFX11-NEXT: s_mov_b32 s62, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s8, s4, 16
; GFX11-NEXT: s_lshr_b32 s9, s5, 16
@@ -4223,30 +4103,6 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v18, s7 :: v_dual_mov_b32 v19, s6
; GFX11-NEXT: v_dual_mov_b32 v20, s5 :: v_dual_mov_b32 v21, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5272,7 +5128,7 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; SI-NEXT: v_writelane_b32 v22, s66, 18
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v22, s67, 19
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s35, 16
@@ -5500,9 +5356,6 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v44i16_to_v22i32_scalar:
; VI: ; %bb.0:
@@ -5566,7 +5419,7 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; VI-NEXT: s_lshr_b32 s89, s90, 16
; VI-NEXT: v_readfirstlane_b32 s4, v8
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s71, 16
@@ -5798,9 +5651,6 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v44i16_to_v22i32_scalar:
; GFX9: ; %bb.0:
@@ -5874,9 +5724,9 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s55, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s56, s56, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s57, s57, s58
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -5900,10 +5750,8 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v19, s55, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v20, s56, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v21, s57, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB15_5
+; GFX9-NEXT: s_branch .LBB15_4
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -5936,7 +5784,7 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB15_5: ; %end
+; GFX9-NEXT: .LBB15_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -6009,10 +5857,10 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s19, s57, s62
; GFX11-NEXT: s_pack_ll_b32_b16 s20, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s21, s46, s59
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -6038,8 +5886,6 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v21, s21, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -6714,7 +6560,7 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s11, v1
; SI-NEXT: s_cmp_lg_u32 s10, 0
; SI-NEXT: v_readfirstlane_b32 s10, v0
-; SI-NEXT: s_cbranch_scc0 .LBB17_4
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s76, s5, 16
; SI-NEXT: s_lshr_b32 s77, s7, 16
@@ -6874,30 +6720,6 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v20, s4
; SI-NEXT: v_mov_b32_e32 v21, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB17_4:
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: s_branch .LBB17_2
;
; VI-LABEL: bitcast_v22i32_to_v44f16_scalar:
; VI: ; %bb.0:
@@ -6912,7 +6734,7 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s12, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s13, v0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s14, s6, 16
; VI-NEXT: s_lshr_b32 s15, s7, 16
@@ -7072,30 +6894,6 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v20, s7
; VI-NEXT: v_mov_b32_e32 v21, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v22i32_to_v44f16_scalar:
; GFX9: ; %bb.0:
@@ -7110,7 +6908,7 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s12, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s13, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s14, s6, 16
; GFX9-NEXT: s_lshr_b32 s15, s7, 16
@@ -7226,30 +7024,6 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v20, s7
; GFX9-NEXT: v_mov_b32_e32 v21, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v22i32_to_v44f16_scalar:
; GFX11: ; %bb.0:
@@ -7261,7 +7035,7 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s7, v0
; GFX11-NEXT: s_cmp_lg_u32 s8, 0
; GFX11-NEXT: s_mov_b32 s62, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s8, s4, 16
; GFX11-NEXT: s_lshr_b32 s9, s5, 16
@@ -7368,30 +7142,6 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v18, s7 :: v_dual_mov_b32 v19, s6
; GFX11-NEXT: v_dual_mov_b32 v20, s5 :: v_dual_mov_b32 v21, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8516,7 +8266,7 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; SI-NEXT: v_writelane_b32 v32, s66, 18
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v32, s67, 19
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s35, 16
@@ -8584,7 +8334,7 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s57, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB19_4
+; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s35
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -8762,11 +8512,8 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v20, v21, v20
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v22
; SI-NEXT: v_or_b32_e32 v21, v23, v21
-; SI-NEXT: s_branch .LBB19_5
+; SI-NEXT: s_branch .LBB19_4
; SI-NEXT: .LBB19_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB19_2
-; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -8799,7 +8546,7 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB19_5: ; %end
+; SI-NEXT: .LBB19_4: ; %end
; SI-NEXT: v_readlane_b32 s67, v32, 19
; SI-NEXT: v_readlane_b32 s66, v32, 18
; SI-NEXT: v_readlane_b32 s65, v32, 17
@@ -8888,7 +8635,7 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s91, s89, 16
; VI-NEXT: v_readfirstlane_b32 s4, v8
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s71, 16
@@ -8956,7 +8703,7 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s57, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB19_4
+; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v21, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s71
@@ -9047,11 +8794,8 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; VI-NEXT: v_add_f16_sdwa v22, v22, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v21, s6, v21
; VI-NEXT: v_or_b32_e32 v21, v21, v22
-; VI-NEXT: s_branch .LBB19_5
+; VI-NEXT: s_branch .LBB19_4
; VI-NEXT: .LBB19_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB19_2
-; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -9084,7 +8828,7 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB19_5: ; %end
+; VI-NEXT: .LBB19_4: ; %end
; VI-NEXT: v_readlane_b32 s71, v32, 23
; VI-NEXT: v_readlane_b32 s70, v32, 22
; VI-NEXT: v_readlane_b32 s69, v32, 21
@@ -9187,9 +8931,9 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s55, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s56, s56, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s57, s57, s58
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v21, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v21 op_sel_hi:[1,0]
@@ -9214,10 +8958,8 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v19, s55, v21 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v20, s56, v21 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v21, s57, v21 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB19_5
+; GFX9-NEXT: s_branch .LBB19_4
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -9250,7 +8992,7 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB19_5: ; %end
+; GFX9-NEXT: .LBB19_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -9323,10 +9065,10 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s19, s57, s62
; GFX11-NEXT: s_pack_ll_b32_b16 s20, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s21, s46, s59
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -9352,8 +9094,6 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v21, 0x200, s21 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -9579,9 +9319,9 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB21_3
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_4
+; SI-NEXT: s_cbranch_execnz .LBB21_3
; SI-NEXT: .LBB21_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v21, s57, 1.0
; SI-NEXT: v_add_f32_e64 v20, s56, 1.0
@@ -9605,10 +9345,8 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB21_5
+; SI-NEXT: s_branch .LBB21_4
; SI-NEXT: .LBB21_3:
-; SI-NEXT: s_branch .LBB21_2
-; SI-NEXT: .LBB21_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -9641,7 +9379,7 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB21_5: ; %end
+; SI-NEXT: .LBB21_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -9702,9 +9440,9 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB21_3
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_4
+; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v21, s57, 1.0
; VI-NEXT: v_add_f32_e64 v20, s56, 1.0
@@ -9728,10 +9466,8 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB21_5
+; VI-NEXT: s_branch .LBB21_4
; VI-NEXT: .LBB21_3:
-; VI-NEXT: s_branch .LBB21_2
-; VI-NEXT: .LBB21_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -9764,7 +9500,7 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB21_5: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -9825,9 +9561,9 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
+; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v21, s57, 1.0
; GFX9-NEXT: v_add_f32_e64 v20, s56, 1.0
@@ -9851,10 +9587,8 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB21_5
+; GFX9-NEXT: s_branch .LBB21_4
; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
-; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -9887,7 +9621,7 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB21_5: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -9949,10 +9683,10 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
; GFX11-NEXT: .LBB21_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v21, s57, 1.0
; GFX11-NEXT: v_add_f32_e64 v20, s56, 1.0
@@ -9976,10 +9710,8 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB21_5
+; GFX11-NEXT: s_branch .LBB21_4
; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
-; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -9996,7 +9728,7 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB21_5: ; %end
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -10209,7 +9941,7 @@ define inreg <22 x float> @bitcast_v11i64_to_v22f32_scalar(<11 x i64> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s12, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s13, v0
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB23_3
; SI-NEXT: .LBB23_2: ; %cmp.true
@@ -10259,8 +9991,6 @@ define inreg <22 x float> @bitcast_v11i64_to_v22f32_scalar(<11 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v20, s7
; SI-NEXT: v_mov_b32_e32 v21, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v11i64_to_v22f32_scalar:
; VI: ; %bb.0:
@@ -10275,7 +10005,7 @@ define inreg <22 x float> @bitcast_v11i64_to_v22f32_scalar(<11 x i64> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s12, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s13, v0
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
@@ -10325,8 +10055,6 @@ define inreg <22 x float> @bitcast_v11i64_to_v22f32_scalar(<11 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v20, s7
; VI-NEXT: v_mov_b32_e32 v21, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v11i64_to_v22f32_scalar:
; GFX9: ; %bb.0:
@@ -10341,7 +10069,7 @@ define inreg <22 x float> @bitcast_v11i64_to_v22f32_scalar(<11 x i64> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s12, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s13, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
@@ -10391,8 +10119,6 @@ define inreg <22 x float> @bitcast_v11i64_to_v22f32_scalar(<11 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v20, s7
; GFX9-NEXT: v_mov_b32_e32 v21, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v11i64_to_v22f32_scalar:
; GFX11: ; %bb.0:
@@ -10404,7 +10130,7 @@ define inreg <22 x float> @bitcast_v11i64_to_v22f32_scalar(<11 x i64> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s7, v0
; GFX11-NEXT: s_cmp_lg_u32 s8, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
@@ -10445,8 +10171,6 @@ define inreg <22 x float> @bitcast_v11i64_to_v22f32_scalar(<11 x i64> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v18, s7 :: v_dual_mov_b32 v19, s6
; GFX11-NEXT: v_dual_mov_b32 v20, s5 :: v_dual_mov_b32 v21, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: s_branch .LBB23_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10655,9 +10379,9 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB25_3
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_4
+; SI-NEXT: s_cbranch_execnz .LBB25_3
; SI-NEXT: .LBB25_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v21, s57, 1.0
; SI-NEXT: v_add_f32_e64 v20, s56, 1.0
@@ -10681,10 +10405,8 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB25_5
+; SI-NEXT: s_branch .LBB25_4
; SI-NEXT: .LBB25_3:
-; SI-NEXT: s_branch .LBB25_2
-; SI-NEXT: .LBB25_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -10717,7 +10439,7 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB25_5: ; %end
+; SI-NEXT: .LBB25_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -10778,9 +10500,9 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB25_3
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_4
+; VI-NEXT: s_cbranch_execnz .LBB25_3
; VI-NEXT: .LBB25_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v21, s57, 1.0
; VI-NEXT: v_add_f32_e64 v20, s56, 1.0
@@ -10804,10 +10526,8 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB25_5
+; VI-NEXT: s_branch .LBB25_4
; VI-NEXT: .LBB25_3:
-; VI-NEXT: s_branch .LBB25_2
-; VI-NEXT: .LBB25_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -10840,7 +10560,7 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB25_5: ; %end
+; VI-NEXT: .LBB25_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -10901,9 +10621,9 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_4
+; GFX9-NEXT: s_cbranch_execnz .LBB25_3
; GFX9-NEXT: .LBB25_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v21, s57, 1.0
; GFX9-NEXT: v_add_f32_e64 v20, s56, 1.0
@@ -10927,10 +10647,8 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB25_5
+; GFX9-NEXT: s_branch .LBB25_4
; GFX9-NEXT: .LBB25_3:
-; GFX9-NEXT: s_branch .LBB25_2
-; GFX9-NEXT: .LBB25_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -10963,7 +10681,7 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB25_5: ; %end
+; GFX9-NEXT: .LBB25_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -11025,10 +10743,10 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_3
; GFX11-NEXT: .LBB25_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v21, s57, 1.0
; GFX11-NEXT: v_add_f32_e64 v20, s56, 1.0
@@ -11052,10 +10770,8 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB25_5
+; GFX11-NEXT: s_branch .LBB25_4
; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
-; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -11072,7 +10788,7 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB25_5: ; %end
+; GFX11-NEXT: .LBB25_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -11265,9 +10981,9 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB27_3
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB27_4
+; SI-NEXT: s_cbranch_execnz .LBB27_3
; SI-NEXT: .LBB27_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
; SI-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
@@ -11280,10 +10996,8 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; SI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; SI-NEXT: s_branch .LBB27_5
+; SI-NEXT: s_branch .LBB27_4
; SI-NEXT: .LBB27_3:
-; SI-NEXT: s_branch .LBB27_2
-; SI-NEXT: .LBB27_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -11316,7 +11030,7 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB27_5: ; %end
+; SI-NEXT: .LBB27_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -11377,9 +11091,9 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB27_3
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_4
+; VI-NEXT: s_cbranch_execnz .LBB27_3
; VI-NEXT: .LBB27_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
; VI-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
@@ -11392,10 +11106,8 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; VI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; VI-NEXT: s_branch .LBB27_5
+; VI-NEXT: s_branch .LBB27_4
; VI-NEXT: .LBB27_3:
-; VI-NEXT: s_branch .LBB27_2
-; VI-NEXT: .LBB27_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -11428,7 +11140,7 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB27_5: ; %end
+; VI-NEXT: .LBB27_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -11489,9 +11201,9 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_4
+; GFX9-NEXT: s_cbranch_execnz .LBB27_3
; GFX9-NEXT: .LBB27_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
; GFX9-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
@@ -11504,10 +11216,8 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; GFX9-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX9-NEXT: s_branch .LBB27_5
+; GFX9-NEXT: s_branch .LBB27_4
; GFX9-NEXT: .LBB27_3:
-; GFX9-NEXT: s_branch .LBB27_2
-; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -11540,7 +11250,7 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB27_5: ; %end
+; GFX9-NEXT: .LBB27_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -11602,10 +11312,10 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB27_3
; GFX11-NEXT: .LBB27_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
; GFX11-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
@@ -11618,10 +11328,8 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; GFX11-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX11-NEXT: s_branch .LBB27_5
+; GFX11-NEXT: s_branch .LBB27_4
; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
-; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -11638,7 +11346,7 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB27_5: ; %end
+; GFX11-NEXT: .LBB27_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -12291,7 +11999,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s11, v1
; SI-NEXT: s_cmp_lg_u32 s10, 0
; SI-NEXT: v_readfirstlane_b32 s10, v0
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s94, s5, 16
; SI-NEXT: s_lshr_b32 s93, s7, 16
@@ -12315,7 +12023,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[60:61], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[62:63], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[72:73], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB29_4
+; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v21, s5, 1.0
; SI-NEXT: v_add_f32_e64 v20, s4, 1.0
@@ -12361,32 +12069,8 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v1
-; SI-NEXT: s_branch .LBB29_5
+; SI-NEXT: s_branch .LBB29_4
; SI-NEXT: .LBB29_3:
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: s_branch .LBB29_2
-; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -12431,7 +12115,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v30, s60
; SI-NEXT: v_mov_b32_e32 v31, s62
; SI-NEXT: v_mov_b32_e32 v32, s72
-; SI-NEXT: .LBB29_5: ; %end
+; SI-NEXT: .LBB29_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31
@@ -12513,7 +12197,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s12, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s13, v0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s14, s6, 16
; VI-NEXT: s_lshr_b32 s15, s7, 16
@@ -12537,7 +12221,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; VI-NEXT: s_lshr_b32 s73, s18, 16
; VI-NEXT: s_lshr_b32 s74, s17, 16
; VI-NEXT: s_lshr_b32 s75, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB29_4
+; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v21, s6, 1.0
; VI-NEXT: v_add_f32_e64 v20, s7, 1.0
@@ -12583,32 +12267,8 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v49, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v50, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; VI-NEXT: s_branch .LBB29_5
+; VI-NEXT: s_branch .LBB29_4
; VI-NEXT: .LBB29_3:
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB29_2
-; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -12653,7 +12313,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v24, s40
; VI-NEXT: v_mov_b32_e32 v23, s15
; VI-NEXT: v_mov_b32_e32 v22, s14
-; VI-NEXT: .LBB29_5: ; %end
+; VI-NEXT: .LBB29_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v51, 16, v51
; VI-NEXT: v_lshlrev_b32_e32 v50, 16, v50
; VI-NEXT: v_lshlrev_b32_e32 v49, 16, v49
@@ -12713,7 +12373,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s12, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s13, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s14, s6, 16
; GFX9-NEXT: s_lshr_b32 s15, s7, 16
@@ -12737,7 +12397,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: s_lshr_b32 s73, s18, 16
; GFX9-NEXT: s_lshr_b32 s74, s17, 16
; GFX9-NEXT: s_lshr_b32 s75, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
+; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v21, s6, 1.0
; GFX9-NEXT: v_add_f32_e64 v20, s7, 1.0
@@ -12783,32 +12443,8 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX9-NEXT: s_branch .LBB29_5
+; GFX9-NEXT: s_branch .LBB29_4
; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB29_2
-; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -12853,7 +12489,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v24, s40
; GFX9-NEXT: v_mov_b32_e32 v23, s15
; GFX9-NEXT: v_mov_b32_e32 v22, s14
-; GFX9-NEXT: .LBB29_5: ; %end
+; GFX9-NEXT: .LBB29_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -12910,7 +12546,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s8, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s4, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s5, 16
@@ -12935,7 +12571,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s61, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s62, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f32_e64 v21, s4, 1.0
; GFX11-TRUE16-NEXT: v_add_f32_e64 v20, s5, 1.0
@@ -12981,32 +12617,8 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB29_5
+; GFX11-TRUE16-NEXT: s_branch .LBB29_4
; GFX11-TRUE16-NEXT: .LBB29_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-TRUE16-NEXT: s_branch .LBB29_2
-; GFX11-TRUE16-NEXT: .LBB29_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -13029,7 +12641,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
-; GFX11-TRUE16-NEXT: .LBB29_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB29_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
@@ -13076,7 +12688,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s8, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s4, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s5, 16
@@ -13101,7 +12713,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s61, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s62, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, s4, 1.0
; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, s5, 1.0
@@ -13147,32 +12759,8 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v1
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v6
-; GFX11-FAKE16-NEXT: s_branch .LBB29_5
+; GFX11-FAKE16-NEXT: s_branch .LBB29_4
; GFX11-FAKE16-NEXT: .LBB29_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-FAKE16-NEXT: s_branch .LBB29_2
-; GFX11-FAKE16-NEXT: .LBB29_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s0 :: v_dual_mov_b32 v5, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v0, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v3, s17
@@ -13195,7 +12783,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
-; GFX11-FAKE16-NEXT: .LBB29_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB29_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
; GFX11-FAKE16-NEXT: v_and_b32_e32 v52, 0xffff, v1
@@ -14266,7 +13854,7 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; SI-NEXT: v_writelane_b32 v22, s66, 18
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v22, s67, 19
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s35, 16
@@ -14494,9 +14082,6 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v44i16_to_v22f32_scalar:
; VI: ; %bb.0:
@@ -14560,7 +14145,7 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; VI-NEXT: s_lshr_b32 s89, s90, 16
; VI-NEXT: v_readfirstlane_b32 s4, v8
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s71, 16
@@ -14792,9 +14377,6 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v44i16_to_v22f32_scalar:
; GFX9: ; %bb.0:
@@ -14868,9 +14450,9 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s55, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s56, s56, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s57, s57, s58
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_4
+; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -14894,10 +14476,8 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v19, s55, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v20, s56, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v21, s57, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB31_5
+; GFX9-NEXT: s_branch .LBB31_4
; GFX9-NEXT: .LBB31_3:
-; GFX9-NEXT: s_branch .LBB31_2
-; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -14930,7 +14510,7 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB31_5: ; %end
+; GFX9-NEXT: .LBB31_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -15003,10 +14583,10 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s19, s57, s62
; GFX11-NEXT: s_pack_ll_b32_b16 s20, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s21, s46, s59
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-NEXT: .LBB31_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -15032,8 +14612,6 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v21, s21, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
-; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -15686,7 +15264,7 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s11, v1
; SI-NEXT: s_cmp_lg_u32 s10, 0
; SI-NEXT: v_readfirstlane_b32 s10, v0
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s94, s5, 16
; SI-NEXT: s_lshr_b32 s93, s7, 16
@@ -15710,7 +15288,7 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[60:61], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[62:63], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[72:73], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB33_4
+; SI-NEXT: s_cbranch_execnz .LBB33_3
; SI-NEXT: .LBB33_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v21, s5, 1.0
; SI-NEXT: v_add_f32_e64 v20, s4, 1.0
@@ -15756,32 +15334,8 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v1
-; SI-NEXT: s_branch .LBB33_5
+; SI-NEXT: s_branch .LBB33_4
; SI-NEXT: .LBB33_3:
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: s_branch .LBB33_2
-; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -15826,7 +15380,7 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v30, s60
; SI-NEXT: v_mov_b32_e32 v31, s62
; SI-NEXT: v_mov_b32_e32 v32, s72
-; SI-NEXT: .LBB33_5: ; %end
+; SI-NEXT: .LBB33_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31
@@ -15908,7 +15462,7 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s12, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s13, v0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s14, s6, 16
; VI-NEXT: s_lshr_b32 s15, s7, 16
@@ -15932,7 +15486,7 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; VI-NEXT: s_lshr_b32 s73, s18, 16
; VI-NEXT: s_lshr_b32 s74, s17, 16
; VI-NEXT: s_lshr_b32 s75, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB33_4
+; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v21, s6, 1.0
; VI-NEXT: v_add_f32_e64 v20, s7, 1.0
@@ -15978,32 +15532,8 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v49, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v50, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; VI-NEXT: s_branch .LBB33_5
+; VI-NEXT: s_branch .LBB33_4
; VI-NEXT: .LBB33_3:
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB33_2
-; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -16048,7 +15578,7 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v24, s40
; VI-NEXT: v_mov_b32_e32 v23, s15
; VI-NEXT: v_mov_b32_e32 v22, s14
-; VI-NEXT: .LBB33_5: ; %end
+; VI-NEXT: .LBB33_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v51, 16, v51
; VI-NEXT: v_lshlrev_b32_e32 v50, 16, v50
; VI-NEXT: v_lshlrev_b32_e32 v49, 16, v49
@@ -16108,7 +15638,7 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s12, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s13, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s14, s6, 16
; GFX9-NEXT: s_lshr_b32 s15, s7, 16
@@ -16132,7 +15662,7 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: s_lshr_b32 s73, s18, 16
; GFX9-NEXT: s_lshr_b32 s74, s17, 16
; GFX9-NEXT: s_lshr_b32 s75, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
+; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v21, s6, 1.0
; GFX9-NEXT: v_add_f32_e64 v20, s7, 1.0
@@ -16178,32 +15708,8 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX9-NEXT: s_branch .LBB33_5
+; GFX9-NEXT: s_branch .LBB33_4
; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB33_2
-; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -16248,7 +15754,7 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v24, s40
; GFX9-NEXT: v_mov_b32_e32 v23, s15
; GFX9-NEXT: v_mov_b32_e32 v22, s14
-; GFX9-NEXT: .LBB33_5: ; %end
+; GFX9-NEXT: .LBB33_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -16305,7 +15811,7 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s8, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s4, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s5, 16
@@ -16330,7 +15836,7 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s61, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s62, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f32_e64 v21, s4, 1.0
; GFX11-TRUE16-NEXT: v_add_f32_e64 v20, s5, 1.0
@@ -16376,32 +15882,8 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB33_5
+; GFX11-TRUE16-NEXT: s_branch .LBB33_4
; GFX11-TRUE16-NEXT: .LBB33_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-TRUE16-NEXT: s_branch .LBB33_2
-; GFX11-TRUE16-NEXT: .LBB33_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -16424,7 +15906,7 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
-; GFX11-TRUE16-NEXT: .LBB33_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB33_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
@@ -16471,7 +15953,7 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s8, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s4, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s5, 16
@@ -16496,7 +15978,7 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s61, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s62, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, s4, 1.0
; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, s5, 1.0
@@ -16542,32 +16024,8 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v1
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v6
-; GFX11-FAKE16-NEXT: s_branch .LBB33_5
+; GFX11-FAKE16-NEXT: s_branch .LBB33_4
; GFX11-FAKE16-NEXT: .LBB33_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-FAKE16-NEXT: s_branch .LBB33_2
-; GFX11-FAKE16-NEXT: .LBB33_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s0 :: v_dual_mov_b32 v5, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v0, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v3, s17
@@ -16590,7 +16048,7 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
-; GFX11-FAKE16-NEXT: .LBB33_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB33_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
; GFX11-FAKE16-NEXT: v_and_b32_e32 v52, 0xffff, v1
@@ -17760,7 +17218,7 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; SI-NEXT: v_writelane_b32 v32, s66, 18
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v32, s67, 19
-; SI-NEXT: s_cbranch_scc0 .LBB35_3
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s35, 16
@@ -17828,7 +17286,7 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s57, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB35_4
+; SI-NEXT: s_cbranch_execnz .LBB35_3
; SI-NEXT: .LBB35_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s35
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -18006,11 +17464,8 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v20, v21, v20
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v22
; SI-NEXT: v_or_b32_e32 v21, v23, v21
-; SI-NEXT: s_branch .LBB35_5
+; SI-NEXT: s_branch .LBB35_4
; SI-NEXT: .LBB35_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB35_2
-; SI-NEXT: .LBB35_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -18043,7 +17498,7 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB35_5: ; %end
+; SI-NEXT: .LBB35_4: ; %end
; SI-NEXT: v_readlane_b32 s67, v32, 19
; SI-NEXT: v_readlane_b32 s66, v32, 18
; SI-NEXT: v_readlane_b32 s65, v32, 17
@@ -18132,7 +17587,7 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; VI-NEXT: s_lshr_b32 s91, s89, 16
; VI-NEXT: v_readfirstlane_b32 s4, v8
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s71, 16
@@ -18200,7 +17655,7 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s57, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB35_4
+; VI-NEXT: s_cbranch_execnz .LBB35_3
; VI-NEXT: .LBB35_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v21, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s71
@@ -18291,11 +17746,8 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; VI-NEXT: v_add_f16_sdwa v22, v22, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v21, s6, v21
; VI-NEXT: v_or_b32_e32 v21, v21, v22
-; VI-NEXT: s_branch .LBB35_5
+; VI-NEXT: s_branch .LBB35_4
; VI-NEXT: .LBB35_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB35_2
-; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -18328,7 +17780,7 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB35_5: ; %end
+; VI-NEXT: .LBB35_4: ; %end
; VI-NEXT: v_readlane_b32 s71, v32, 23
; VI-NEXT: v_readlane_b32 s70, v32, 22
; VI-NEXT: v_readlane_b32 s69, v32, 21
@@ -18431,9 +17883,9 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s55, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s56, s56, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s57, s57, s58
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
+; GFX9-NEXT: s_cbranch_execnz .LBB35_3
; GFX9-NEXT: .LBB35_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v21, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v21 op_sel_hi:[1,0]
@@ -18458,10 +17910,8 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v19, s55, v21 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v20, s56, v21 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v21, s57, v21 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB35_5
+; GFX9-NEXT: s_branch .LBB35_4
; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
-; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -18494,7 +17944,7 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB35_5: ; %end
+; GFX9-NEXT: .LBB35_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -18567,10 +18017,10 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s19, s57, s62
; GFX11-NEXT: s_pack_ll_b32_b16 s20, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s21, s46, s59
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-NEXT: .LBB35_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -18596,8 +18046,6 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v21, 0x200, s21 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
-; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -18810,7 +18258,7 @@ define inreg <11 x double> @bitcast_v11i64_to_v11f64_scalar(<11 x i64> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s12, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s13, v0
-; SI-NEXT: s_cbranch_scc0 .LBB37_4
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB37_3
; SI-NEXT: .LBB37_2: ; %cmp.true
@@ -18860,8 +18308,6 @@ define inreg <11 x double> @bitcast_v11i64_to_v11f64_scalar(<11 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v20, s7
; SI-NEXT: v_mov_b32_e32 v21, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_4:
-; SI-NEXT: s_branch .LBB37_2
;
; VI-LABEL: bitcast_v11i64_to_v11f64_scalar:
; VI: ; %bb.0:
@@ -18876,7 +18322,7 @@ define inreg <11 x double> @bitcast_v11i64_to_v11f64_scalar(<11 x i64> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s12, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s13, v0
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB37_3
; VI-NEXT: .LBB37_2: ; %cmp.true
@@ -18926,8 +18372,6 @@ define inreg <11 x double> @bitcast_v11i64_to_v11f64_scalar(<11 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v20, s7
; VI-NEXT: v_mov_b32_e32 v21, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v11i64_to_v11f64_scalar:
; GFX9: ; %bb.0:
@@ -18942,7 +18386,7 @@ define inreg <11 x double> @bitcast_v11i64_to_v11f64_scalar(<11 x i64> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s12, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s13, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB37_3
; GFX9-NEXT: .LBB37_2: ; %cmp.true
@@ -18992,8 +18436,6 @@ define inreg <11 x double> @bitcast_v11i64_to_v11f64_scalar(<11 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v20, s7
; GFX9-NEXT: v_mov_b32_e32 v21, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v11i64_to_v11f64_scalar:
; GFX11: ; %bb.0:
@@ -19005,7 +18447,7 @@ define inreg <11 x double> @bitcast_v11i64_to_v11f64_scalar(<11 x i64> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s7, v0
; GFX11-NEXT: s_cmp_lg_u32 s8, 0
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
@@ -19045,8 +18487,6 @@ define inreg <11 x double> @bitcast_v11i64_to_v11f64_scalar(<11 x i64> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v18, s7 :: v_dual_mov_b32 v19, s6
; GFX11-NEXT: v_dual_mov_b32 v20, s5 :: v_dual_mov_b32 v21, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_4:
-; GFX11-NEXT: s_branch .LBB37_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -19222,9 +18662,9 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB39_3
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_4
+; SI-NEXT: s_cbranch_execnz .LBB39_3
; SI-NEXT: .LBB39_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -19237,10 +18677,8 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; SI-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
; SI-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
; SI-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
-; SI-NEXT: s_branch .LBB39_5
+; SI-NEXT: s_branch .LBB39_4
; SI-NEXT: .LBB39_3:
-; SI-NEXT: s_branch .LBB39_2
-; SI-NEXT: .LBB39_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -19273,7 +18711,7 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB39_5: ; %end
+; SI-NEXT: .LBB39_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -19334,9 +18772,9 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB39_3
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_4
+; VI-NEXT: s_cbranch_execnz .LBB39_3
; VI-NEXT: .LBB39_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -19349,10 +18787,8 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; VI-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
; VI-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
; VI-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
-; VI-NEXT: s_branch .LBB39_5
+; VI-NEXT: s_branch .LBB39_4
; VI-NEXT: .LBB39_3:
-; VI-NEXT: s_branch .LBB39_2
-; VI-NEXT: .LBB39_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -19385,7 +18821,7 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB39_5: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -19446,9 +18882,9 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
+; GFX9-NEXT: s_cbranch_execnz .LBB39_3
; GFX9-NEXT: .LBB39_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -19461,10 +18897,8 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
; GFX9-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
; GFX9-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
-; GFX9-NEXT: s_branch .LBB39_5
+; GFX9-NEXT: s_branch .LBB39_4
; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
-; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -19497,7 +18931,7 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB39_5: ; %end
+; GFX9-NEXT: .LBB39_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -19559,10 +18993,10 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_3
; GFX11-NEXT: .LBB39_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -19575,10 +19009,8 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
; GFX11-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
; GFX11-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
-; GFX11-NEXT: s_branch .LBB39_5
+; GFX11-NEXT: s_branch .LBB39_4
; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
-; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -19595,7 +19027,7 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB39_5: ; %end
+; GFX11-NEXT: .LBB39_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -20282,7 +19714,7 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s11, v1
; SI-NEXT: s_cmp_lg_u32 s10, 0
; SI-NEXT: v_readfirstlane_b32 s10, v0
-; SI-NEXT: s_cbranch_scc0 .LBB41_4
+; SI-NEXT: s_cbranch_scc0 .LBB41_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s76, s5, 16
; SI-NEXT: s_lshr_b32 s77, s7, 16
@@ -20442,30 +19874,6 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v20, s4
; SI-NEXT: v_mov_b32_e32 v21, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB41_4:
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: s_branch .LBB41_2
;
; VI-LABEL: bitcast_v11i64_to_v44i16_scalar:
; VI: ; %bb.0:
@@ -20480,7 +19888,7 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s12, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s13, v0
-; VI-NEXT: s_cbranch_scc0 .LBB41_4
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s14, s6, 16
; VI-NEXT: s_lshr_b32 s15, s7, 16
@@ -20640,30 +20048,6 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v20, s7
; VI-NEXT: v_mov_b32_e32 v21, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB41_4:
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB41_2
;
; GFX9-LABEL: bitcast_v11i64_to_v44i16_scalar:
; GFX9: ; %bb.0:
@@ -20678,7 +20062,7 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s12, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s13, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s14, s6, 16
; GFX9-NEXT: s_lshr_b32 s15, s7, 16
@@ -20794,30 +20178,6 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v20, s7
; GFX9-NEXT: v_mov_b32_e32 v21, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB41_4:
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB41_2
;
; GFX11-LABEL: bitcast_v11i64_to_v44i16_scalar:
; GFX11: ; %bb.0:
@@ -20829,7 +20189,7 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s7, v0
; GFX11-NEXT: s_cmp_lg_u32 s8, 0
; GFX11-NEXT: s_mov_b32 s62, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s8, s4, 16
; GFX11-NEXT: s_lshr_b32 s9, s5, 16
@@ -20936,30 +20296,6 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v18, s7 :: v_dual_mov_b32 v19, s6
; GFX11-NEXT: v_dual_mov_b32 v20, s5 :: v_dual_mov_b32 v21, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB41_4:
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: s_branch .LBB41_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -21985,7 +21321,7 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; SI-NEXT: v_writelane_b32 v22, s66, 18
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v22, s67, 19
-; SI-NEXT: s_cbranch_scc0 .LBB43_4
+; SI-NEXT: s_cbranch_scc0 .LBB43_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s35, 16
@@ -22213,9 +21549,6 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB43_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB43_2
;
; VI-LABEL: bitcast_v44i16_to_v11i64_scalar:
; VI: ; %bb.0:
@@ -22279,7 +21612,7 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; VI-NEXT: s_lshr_b32 s89, s90, 16
; VI-NEXT: v_readfirstlane_b32 s4, v8
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB43_4
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s71, 16
@@ -22511,9 +21844,6 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB43_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB43_2
;
; GFX9-LABEL: bitcast_v44i16_to_v11i64_scalar:
; GFX9: ; %bb.0:
@@ -22587,9 +21917,9 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s55, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s56, s56, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s57, s57, s58
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
+; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -22613,10 +21943,8 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v19, s55, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v20, s56, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v21, s57, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB43_5
+; GFX9-NEXT: s_branch .LBB43_4
; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
-; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -22649,7 +21977,7 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB43_5: ; %end
+; GFX9-NEXT: .LBB43_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -22722,10 +22050,10 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s19, s57, s62
; GFX11-NEXT: s_pack_ll_b32_b16 s20, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s21, s46, s59
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-NEXT: .LBB43_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -22751,8 +22079,6 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v21, s21, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
-; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -23439,7 +22765,7 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s11, v1
; SI-NEXT: s_cmp_lg_u32 s10, 0
; SI-NEXT: v_readfirstlane_b32 s10, v0
-; SI-NEXT: s_cbranch_scc0 .LBB45_4
+; SI-NEXT: s_cbranch_scc0 .LBB45_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s76, s5, 16
; SI-NEXT: s_lshr_b32 s77, s7, 16
@@ -23599,30 +22925,6 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v20, s4
; SI-NEXT: v_mov_b32_e32 v21, s5
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB45_4:
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: s_branch .LBB45_2
;
; VI-LABEL: bitcast_v11i64_to_v44f16_scalar:
; VI: ; %bb.0:
@@ -23637,7 +22939,7 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s12, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s13, v0
-; VI-NEXT: s_cbranch_scc0 .LBB45_4
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s14, s6, 16
; VI-NEXT: s_lshr_b32 s15, s7, 16
@@ -23797,30 +23099,6 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v20, s7
; VI-NEXT: v_mov_b32_e32 v21, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_4:
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB45_2
;
; GFX9-LABEL: bitcast_v11i64_to_v44f16_scalar:
; GFX9: ; %bb.0:
@@ -23835,7 +23113,7 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s12, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s13, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s14, s6, 16
; GFX9-NEXT: s_lshr_b32 s15, s7, 16
@@ -23951,30 +23229,6 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v20, s7
; GFX9-NEXT: v_mov_b32_e32 v21, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_4:
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB45_2
;
; GFX11-LABEL: bitcast_v11i64_to_v44f16_scalar:
; GFX11: ; %bb.0:
@@ -23986,7 +23240,7 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s7, v0
; GFX11-NEXT: s_cmp_lg_u32 s8, 0
; GFX11-NEXT: s_mov_b32 s62, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s8, s4, 16
; GFX11-NEXT: s_lshr_b32 s9, s5, 16
@@ -24093,30 +23347,6 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v18, s7 :: v_dual_mov_b32 v19, s6
; GFX11-NEXT: v_dual_mov_b32 v20, s5 :: v_dual_mov_b32 v21, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_4:
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: s_branch .LBB45_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -25241,7 +24471,7 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; SI-NEXT: v_writelane_b32 v32, s66, 18
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v32, s67, 19
-; SI-NEXT: s_cbranch_scc0 .LBB47_3
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s35, 16
@@ -25309,7 +24539,7 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s57, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB47_4
+; SI-NEXT: s_cbranch_execnz .LBB47_3
; SI-NEXT: .LBB47_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s35
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -25487,11 +24717,8 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v20, v21, v20
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v22
; SI-NEXT: v_or_b32_e32 v21, v23, v21
-; SI-NEXT: s_branch .LBB47_5
+; SI-NEXT: s_branch .LBB47_4
; SI-NEXT: .LBB47_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB47_2
-; SI-NEXT: .LBB47_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -25524,7 +24751,7 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB47_5: ; %end
+; SI-NEXT: .LBB47_4: ; %end
; SI-NEXT: v_readlane_b32 s67, v32, 19
; SI-NEXT: v_readlane_b32 s66, v32, 18
; SI-NEXT: v_readlane_b32 s65, v32, 17
@@ -25613,7 +24840,7 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s91, s89, 16
; VI-NEXT: v_readfirstlane_b32 s4, v8
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s71, 16
@@ -25681,7 +24908,7 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s57, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB47_4
+; VI-NEXT: s_cbranch_execnz .LBB47_3
; VI-NEXT: .LBB47_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v21, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s71
@@ -25772,11 +24999,8 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; VI-NEXT: v_add_f16_sdwa v22, v22, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v21, s6, v21
; VI-NEXT: v_or_b32_e32 v21, v21, v22
-; VI-NEXT: s_branch .LBB47_5
+; VI-NEXT: s_branch .LBB47_4
; VI-NEXT: .LBB47_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB47_2
-; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -25809,7 +25033,7 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB47_5: ; %end
+; VI-NEXT: .LBB47_4: ; %end
; VI-NEXT: v_readlane_b32 s71, v32, 23
; VI-NEXT: v_readlane_b32 s70, v32, 22
; VI-NEXT: v_readlane_b32 s69, v32, 21
@@ -25912,9 +25136,9 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s55, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s56, s56, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s57, s57, s58
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
+; GFX9-NEXT: s_cbranch_execnz .LBB47_3
; GFX9-NEXT: .LBB47_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v21, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v21 op_sel_hi:[1,0]
@@ -25939,10 +25163,8 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v19, s55, v21 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v20, s56, v21 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v21, s57, v21 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB47_5
+; GFX9-NEXT: s_branch .LBB47_4
; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
-; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -25975,7 +25197,7 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB47_5: ; %end
+; GFX9-NEXT: .LBB47_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -26048,10 +25270,10 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s19, s57, s62
; GFX11-NEXT: s_pack_ll_b32_b16 s20, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s21, s46, s59
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-NEXT: .LBB47_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -26077,8 +25299,6 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v21, 0x200, s21 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -26698,7 +25918,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s5, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v0
-; SI-NEXT: s_cbranch_scc0 .LBB49_3
+; SI-NEXT: s_cbranch_scc0 .LBB49_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s94, s9, 16
; SI-NEXT: s_lshr_b32 s93, s11, 16
@@ -26722,7 +25942,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; SI-NEXT: s_lshr_b64 s[60:61], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[62:63], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[72:73], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB49_4
+; SI-NEXT: s_cbranch_execnz .LBB49_3
; SI-NEXT: .LBB49_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[20:21], s[8:9], 1.0
; SI-NEXT: v_add_f64 v[18:19], s[10:11], 1.0
@@ -26757,32 +25977,8 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v1
-; SI-NEXT: s_branch .LBB49_5
+; SI-NEXT: s_branch .LBB49_4
; SI-NEXT: .LBB49_3:
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: s_branch .LBB49_2
-; SI-NEXT: .LBB49_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -26827,7 +26023,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v24, s40
; SI-NEXT: v_mov_b32_e32 v23, s14
; SI-NEXT: v_mov_b32_e32 v22, s12
-; SI-NEXT: .LBB49_5: ; %end
+; SI-NEXT: .LBB49_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31
@@ -26909,7 +26105,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s5, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s4, v0
-; VI-NEXT: s_cbranch_scc0 .LBB49_3
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s14, s9, 16
; VI-NEXT: s_lshr_b32 s57, s8, 16
@@ -26933,7 +26129,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; VI-NEXT: s_lshr_b32 s74, s18, 16
; VI-NEXT: s_lshr_b32 s56, s17, 16
; VI-NEXT: s_lshr_b32 s75, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB49_4
+; VI-NEXT: s_cbranch_execnz .LBB49_3
; VI-NEXT: .LBB49_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[20:21], s[8:9], 1.0
; VI-NEXT: v_add_f64 v[18:19], s[10:11], 1.0
@@ -26968,32 +26164,8 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; VI-NEXT: s_branch .LBB49_5
+; VI-NEXT: s_branch .LBB49_4
; VI-NEXT: .LBB49_3:
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB49_2
-; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -27038,7 +26210,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v28, s40
; VI-NEXT: v_mov_b32_e32 v26, s15
; VI-NEXT: v_mov_b32_e32 v24, s14
-; VI-NEXT: .LBB49_5: ; %end
+; VI-NEXT: .LBB49_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v49, 16, v49
; VI-NEXT: v_lshlrev_b32_e32 v39, 16, v39
; VI-NEXT: v_lshlrev_b32_e32 v37, 16, v37
@@ -27098,7 +26270,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s5, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s14, s9, 16
; GFX9-NEXT: s_lshr_b32 s57, s8, 16
@@ -27122,7 +26294,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; GFX9-NEXT: s_lshr_b32 s74, s18, 16
; GFX9-NEXT: s_lshr_b32 s56, s17, 16
; GFX9-NEXT: s_lshr_b32 s75, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
+; GFX9-NEXT: s_cbranch_execnz .LBB49_3
; GFX9-NEXT: .LBB49_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[20:21], s[8:9], 1.0
; GFX9-NEXT: v_add_f64 v[18:19], s[10:11], 1.0
@@ -27157,32 +26329,8 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX9-NEXT: s_branch .LBB49_5
+; GFX9-NEXT: s_branch .LBB49_4
; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB49_2
-; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -27227,7 +26375,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v28, s40
; GFX9-NEXT: v_mov_b32_e32 v26, s15
; GFX9-NEXT: v_mov_b32_e32 v24, s14
-; GFX9-NEXT: .LBB49_5: ; %end
+; GFX9-NEXT: .LBB49_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -27284,7 +26432,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s8, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s5, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s4, 16
@@ -27309,7 +26457,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s62, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], s[4:5], 1.0
; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], s[6:7], 1.0
@@ -27344,32 +26492,8 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB49_5
+; GFX11-TRUE16-NEXT: s_branch .LBB49_4
; GFX11-TRUE16-NEXT: .LBB49_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-TRUE16-NEXT: s_branch .LBB49_2
-; GFX11-TRUE16-NEXT: .LBB49_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -27392,7 +26516,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s42 :: v_dual_mov_b32 v27, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s14 :: v_dual_mov_b32 v25, s10
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s12 :: v_dual_mov_b32 v23, s9
-; GFX11-TRUE16-NEXT: .LBB49_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB49_4: ; %end
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, v36 :: v_dual_mov_b32 v33, v33
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v26 :: v_dual_mov_b32 v49, v49
@@ -27441,7 +26565,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s8, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s5, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s4, 16
@@ -27466,7 +26590,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s62, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f64 v[17:18], s[4:5], 1.0
; GFX11-FAKE16-NEXT: v_add_f64 v[19:20], s[6:7], 1.0
@@ -27501,32 +26625,8 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v5
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v4
-; GFX11-FAKE16-NEXT: s_branch .LBB49_5
+; GFX11-FAKE16-NEXT: s_branch .LBB49_4
; GFX11-FAKE16-NEXT: .LBB49_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-FAKE16-NEXT: s_branch .LBB49_2
-; GFX11-FAKE16-NEXT: .LBB49_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v23, s18
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v9, s20
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s16 :: v_dual_mov_b32 v7, s22
@@ -27549,7 +26649,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v36, s15 :: v_dual_mov_b32 v33, s13
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s12 :: v_dual_mov_b32 v29, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s10 :: v_dual_mov_b32 v27, s9
-; GFX11-FAKE16-NEXT: .LBB49_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB49_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
@@ -28620,7 +27720,7 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; SI-NEXT: v_writelane_b32 v22, s66, 18
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v22, s67, 19
-; SI-NEXT: s_cbranch_scc0 .LBB51_4
+; SI-NEXT: s_cbranch_scc0 .LBB51_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s35, 16
@@ -28848,9 +27948,6 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB51_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB51_2
;
; VI-LABEL: bitcast_v44i16_to_v11f64_scalar:
; VI: ; %bb.0:
@@ -28914,7 +28011,7 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; VI-NEXT: s_lshr_b32 s89, s90, 16
; VI-NEXT: v_readfirstlane_b32 s4, v8
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB51_4
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s71, 16
@@ -29146,9 +28243,6 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB51_2
;
; GFX9-LABEL: bitcast_v44i16_to_v11f64_scalar:
; GFX9: ; %bb.0:
@@ -29222,9 +28316,9 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s55, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s56, s56, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s57, s57, s58
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB51_4
+; GFX9-NEXT: s_cbranch_execnz .LBB51_3
; GFX9-NEXT: .LBB51_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -29248,10 +28342,8 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v19, s55, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v20, s56, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v21, s57, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB51_5
+; GFX9-NEXT: s_branch .LBB51_4
; GFX9-NEXT: .LBB51_3:
-; GFX9-NEXT: s_branch .LBB51_2
-; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -29284,7 +28376,7 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB51_5: ; %end
+; GFX9-NEXT: .LBB51_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -29357,10 +28449,10 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s19, s57, s62
; GFX11-NEXT: s_pack_ll_b32_b16 s20, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s21, s46, s59
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-NEXT: .LBB51_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -29386,8 +28478,6 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v21, s21, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
-; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -30007,7 +29097,7 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; SI-NEXT: v_readfirstlane_b32 s5, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v0
-; SI-NEXT: s_cbranch_scc0 .LBB53_3
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s94, s9, 16
; SI-NEXT: s_lshr_b32 s93, s11, 16
@@ -30031,7 +29121,7 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; SI-NEXT: s_lshr_b64 s[60:61], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[62:63], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[72:73], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB53_4
+; SI-NEXT: s_cbranch_execnz .LBB53_3
; SI-NEXT: .LBB53_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[20:21], s[8:9], 1.0
; SI-NEXT: v_add_f64 v[18:19], s[10:11], 1.0
@@ -30066,32 +29156,8 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v1
-; SI-NEXT: s_branch .LBB53_5
+; SI-NEXT: s_branch .LBB53_4
; SI-NEXT: .LBB53_3:
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr77
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr79
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: s_branch .LBB53_2
-; SI-NEXT: .LBB53_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -30136,7 +29202,7 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; SI-NEXT: v_mov_b32_e32 v24, s40
; SI-NEXT: v_mov_b32_e32 v23, s14
; SI-NEXT: v_mov_b32_e32 v22, s12
-; SI-NEXT: .LBB53_5: ; %end
+; SI-NEXT: .LBB53_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31
@@ -30218,7 +29284,7 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; VI-NEXT: v_readfirstlane_b32 s5, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s4, v0
-; VI-NEXT: s_cbranch_scc0 .LBB53_3
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s14, s9, 16
; VI-NEXT: s_lshr_b32 s57, s8, 16
@@ -30242,7 +29308,7 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; VI-NEXT: s_lshr_b32 s74, s18, 16
; VI-NEXT: s_lshr_b32 s56, s17, 16
; VI-NEXT: s_lshr_b32 s75, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB53_4
+; VI-NEXT: s_cbranch_execnz .LBB53_3
; VI-NEXT: .LBB53_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[20:21], s[8:9], 1.0
; VI-NEXT: v_add_f64 v[18:19], s[10:11], 1.0
@@ -30277,32 +29343,8 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; VI-NEXT: s_branch .LBB53_5
+; VI-NEXT: s_branch .LBB53_4
; VI-NEXT: .LBB53_3:
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB53_2
-; VI-NEXT: .LBB53_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -30347,7 +29389,7 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; VI-NEXT: v_mov_b32_e32 v28, s40
; VI-NEXT: v_mov_b32_e32 v26, s15
; VI-NEXT: v_mov_b32_e32 v24, s14
-; VI-NEXT: .LBB53_5: ; %end
+; VI-NEXT: .LBB53_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v49, 16, v49
; VI-NEXT: v_lshlrev_b32_e32 v39, 16, v39
; VI-NEXT: v_lshlrev_b32_e32 v37, 16, v37
@@ -30407,7 +29449,7 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; GFX9-NEXT: v_readfirstlane_b32 s5, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s14, s9, 16
; GFX9-NEXT: s_lshr_b32 s57, s8, 16
@@ -30431,7 +29473,7 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; GFX9-NEXT: s_lshr_b32 s74, s18, 16
; GFX9-NEXT: s_lshr_b32 s56, s17, 16
; GFX9-NEXT: s_lshr_b32 s75, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB53_4
+; GFX9-NEXT: s_cbranch_execnz .LBB53_3
; GFX9-NEXT: .LBB53_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[20:21], s[8:9], 1.0
; GFX9-NEXT: v_add_f64 v[18:19], s[10:11], 1.0
@@ -30466,32 +29508,8 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX9-NEXT: s_branch .LBB53_5
+; GFX9-NEXT: s_branch .LBB53_4
; GFX9-NEXT: .LBB53_3:
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB53_2
-; GFX9-NEXT: .LBB53_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -30536,7 +29554,7 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v28, s40
; GFX9-NEXT: v_mov_b32_e32 v26, s15
; GFX9-NEXT: v_mov_b32_e32 v24, s14
-; GFX9-NEXT: .LBB53_5: ; %end
+; GFX9-NEXT: .LBB53_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -30593,7 +29611,7 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s8, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s5, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s4, 16
@@ -30618,7 +29636,7 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s62, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], s[4:5], 1.0
; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], s[6:7], 1.0
@@ -30653,32 +29671,8 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB53_5
+; GFX11-TRUE16-NEXT: s_branch .LBB53_4
; GFX11-TRUE16-NEXT: .LBB53_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-TRUE16-NEXT: s_branch .LBB53_2
-; GFX11-TRUE16-NEXT: .LBB53_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -30701,7 +29695,7 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s42 :: v_dual_mov_b32 v27, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s14 :: v_dual_mov_b32 v25, s10
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s12 :: v_dual_mov_b32 v23, s9
-; GFX11-TRUE16-NEXT: .LBB53_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB53_4: ; %end
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, v36 :: v_dual_mov_b32 v33, v33
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v26 :: v_dual_mov_b32 v49, v49
@@ -30750,7 +29744,7 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s8, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s5, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s4, 16
@@ -30775,7 +29769,7 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s62, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f64 v[17:18], s[4:5], 1.0
; GFX11-FAKE16-NEXT: v_add_f64 v[19:20], s[6:7], 1.0
@@ -30810,32 +29804,8 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v5
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v4
-; GFX11-FAKE16-NEXT: s_branch .LBB53_5
+; GFX11-FAKE16-NEXT: s_branch .LBB53_4
; GFX11-FAKE16-NEXT: .LBB53_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-FAKE16-NEXT: s_branch .LBB53_2
-; GFX11-FAKE16-NEXT: .LBB53_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v23, s18
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v9, s20
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s16 :: v_dual_mov_b32 v7, s22
@@ -30858,7 +29828,7 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v36, s15 :: v_dual_mov_b32 v33, s13
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s12 :: v_dual_mov_b32 v29, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s10 :: v_dual_mov_b32 v27, s9
-; GFX11-FAKE16-NEXT: .LBB53_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB53_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
@@ -32028,7 +30998,7 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; SI-NEXT: v_writelane_b32 v32, s66, 18
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_writelane_b32 v32, s67, 19
-; SI-NEXT: s_cbranch_scc0 .LBB55_3
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s35, 16
@@ -32096,7 +31066,7 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s57, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB55_4
+; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s35
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -32274,11 +31244,8 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; SI-NEXT: v_or_b32_e32 v20, v21, v20
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v22
; SI-NEXT: v_or_b32_e32 v21, v23, v21
-; SI-NEXT: s_branch .LBB55_5
+; SI-NEXT: s_branch .LBB55_4
; SI-NEXT: .LBB55_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB55_2
-; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -32311,7 +31278,7 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB55_5: ; %end
+; SI-NEXT: .LBB55_4: ; %end
; SI-NEXT: v_readlane_b32 s67, v32, 19
; SI-NEXT: v_readlane_b32 s66, v32, 18
; SI-NEXT: v_readlane_b32 s65, v32, 17
@@ -32400,7 +31367,7 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; VI-NEXT: s_lshr_b32 s91, s89, 16
; VI-NEXT: v_readfirstlane_b32 s4, v8
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s71, 16
@@ -32468,7 +31435,7 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s57, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB55_4
+; VI-NEXT: s_cbranch_execnz .LBB55_3
; VI-NEXT: .LBB55_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v21, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s71
@@ -32559,11 +31526,8 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; VI-NEXT: v_add_f16_sdwa v22, v22, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v21, s6, v21
; VI-NEXT: v_or_b32_e32 v21, v21, v22
-; VI-NEXT: s_branch .LBB55_5
+; VI-NEXT: s_branch .LBB55_4
; VI-NEXT: .LBB55_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB55_2
-; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -32596,7 +31560,7 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB55_5: ; %end
+; VI-NEXT: .LBB55_4: ; %end
; VI-NEXT: v_readlane_b32 s71, v32, 23
; VI-NEXT: v_readlane_b32 s70, v32, 22
; VI-NEXT: v_readlane_b32 s69, v32, 21
@@ -32699,9 +31663,9 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX9-NEXT: s_pack_ll_b32_b16 s55, s60, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s56, s56, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s57, s57, s58
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
+; GFX9-NEXT: s_cbranch_execnz .LBB55_3
; GFX9-NEXT: .LBB55_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v21, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v21 op_sel_hi:[1,0]
@@ -32726,10 +31690,8 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX9-NEXT: v_pk_add_f16 v19, s55, v21 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v20, s56, v21 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v21, s57, v21 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB55_5
+; GFX9-NEXT: s_branch .LBB55_4
; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
-; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -32762,7 +31724,7 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB55_5: ; %end
+; GFX9-NEXT: .LBB55_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -32835,10 +31797,10 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-NEXT: s_pack_ll_b32_b16 s19, s57, s62
; GFX11-NEXT: s_pack_ll_b32_b16 s20, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s21, s46, s59
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-NEXT: .LBB55_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -32864,8 +31826,6 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-NEXT: v_pk_add_f16 v21, 0x200, s21 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
-; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -33952,7 +32912,7 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; SI-NEXT: s_lshr_b32 s67, s68, 16
; SI-NEXT: v_readfirstlane_b32 s4, v8
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB57_4
+; SI-NEXT: s_cbranch_scc0 .LBB57_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s5, s17, 0xffff
; SI-NEXT: s_lshl_b32 s7, s30, 16
@@ -34300,30 +33260,6 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB57_4:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: s_branch .LBB57_2
;
; VI-LABEL: bitcast_v44i16_to_v44f16_scalar:
; VI: ; %bb.0:
@@ -34360,7 +33296,7 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; VI-NEXT: s_lshr_b32 s60, s59, 16
; VI-NEXT: v_readfirstlane_b32 s4, v8
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB57_3
; VI-NEXT: .LBB57_2: ; %cmp.true
@@ -34498,8 +33434,6 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v20, s8
; VI-NEXT: v_mov_b32_e32 v21, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v44i16_to_v44f16_scalar:
; GFX9: ; %bb.0:
@@ -34536,9 +33470,9 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s44, s60, 16
; GFX9-NEXT: v_readfirstlane_b32 s4, v8
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
+; GFX9-NEXT: s_cbranch_execnz .LBB57_3
; GFX9-NEXT: .LBB57_2: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s75, s59
; GFX9-NEXT: v_pk_add_u16 v21, s4, 3 op_sel_hi:[1,0]
@@ -34606,10 +33540,8 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v19
; GFX9-NEXT: v_lshrrev_b32_e32 v23, 16, v20
; GFX9-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX9-NEXT: s_branch .LBB57_5
+; GFX9-NEXT: s_branch .LBB57_4
; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
-; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v21, s75
; GFX9-NEXT: v_mov_b32_e32 v20, s74
; GFX9-NEXT: v_mov_b32_e32 v19, s73
@@ -34654,7 +33586,7 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v49, s8
; GFX9-NEXT: v_mov_b32_e32 v50, s7
; GFX9-NEXT: v_mov_b32_e32 v51, s6
-; GFX9-NEXT: .LBB57_5: ; %end
+; GFX9-NEXT: .LBB57_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -34733,10 +33665,10 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s58, 16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s62, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s62, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s62
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s57, s61, s57
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s56, s60, s56
@@ -34804,10 +33736,8 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX11-TRUE16-NEXT: s_branch .LBB57_5
+; GFX11-TRUE16-NEXT: s_branch .LBB57_4
; GFX11-TRUE16-NEXT: .LBB57_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB57_2
-; GFX11-TRUE16-NEXT: .LBB57_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, s61 :: v_dual_mov_b32 v20, s60
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, s59 :: v_dual_mov_b32 v18, s58
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
@@ -34830,7 +33760,7 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s9 :: v_dual_mov_b32 v39, s8
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, s7 :: v_dual_mov_b32 v49, s6
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s4 :: v_dual_mov_b32 v51, s5
-; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB57_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
@@ -34899,10 +33829,10 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s58, 16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s62, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s62, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s62
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-FAKE16-NEXT: .LBB57_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s57, s61, s57
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s56, s60, s56
@@ -34970,10 +33900,8 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v18
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-FAKE16-NEXT: s_branch .LBB57_5
+; GFX11-FAKE16-NEXT: s_branch .LBB57_4
; GFX11-FAKE16-NEXT: .LBB57_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB57_2
-; GFX11-FAKE16-NEXT: .LBB57_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s61 :: v_dual_mov_b32 v18, s60
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, s59 :: v_dual_mov_b32 v20, s58
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s29 :: v_dual_mov_b32 v12, s28
@@ -34996,7 +33924,7 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v38, s9 :: v_dual_mov_b32 v39, s8
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v48, s7 :: v_dual_mov_b32 v49, s6
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v50, s4 :: v_dual_mov_b32 v51, s5
-; GFX11-FAKE16-NEXT: .LBB57_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB57_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
; GFX11-FAKE16-NEXT: v_and_b32_e32 v52, 0xffff, v1
@@ -35825,9 +34753,9 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
-; SI-NEXT: s_cbranch_scc0 .LBB59_3
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB59_4
+; SI-NEXT: s_cbranch_execnz .LBB59_3
; SI-NEXT: .LBB59_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s75
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -36023,10 +34951,8 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; SI-NEXT: v_lshr_b64 v[26:27], v[16:17], 16
; SI-NEXT: v_lshr_b64 v[24:25], v[18:19], 16
; SI-NEXT: v_lshr_b64 v[22:23], v[20:21], 16
-; SI-NEXT: s_branch .LBB59_5
+; SI-NEXT: s_branch .LBB59_4
; SI-NEXT: .LBB59_3:
-; SI-NEXT: s_branch .LBB59_2
-; SI-NEXT: .LBB59_4:
; SI-NEXT: v_mov_b32_e32 v42, s58
; SI-NEXT: v_mov_b32_e32 v44, s56
; SI-NEXT: v_mov_b32_e32 v45, s46
@@ -36077,7 +35003,7 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v26, s47
; SI-NEXT: v_mov_b32_e32 v24, s45
; SI-NEXT: v_mov_b32_e32 v22, s43
-; SI-NEXT: .LBB59_5: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v50
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
; SI-NEXT: v_or_b32_e32 v0, v2, v0
@@ -36198,9 +35124,9 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s75, s74, 16
; VI-NEXT: v_readfirstlane_b32 s4, v8
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
+; VI-NEXT: s_cbranch_execnz .LBB59_3
; VI-NEXT: .LBB59_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v22, 0x200
; VI-NEXT: v_add_f16_e32 v0, s16, v22
@@ -36247,10 +35173,8 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v23, s47, v22
; VI-NEXT: v_add_f16_e32 v21, s44, v22
; VI-NEXT: v_add_f16_e32 v22, s45, v22
-; VI-NEXT: s_branch .LBB59_5
+; VI-NEXT: s_branch .LBB59_4
; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
-; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v22, s45
; VI-NEXT: v_mov_b32_e32 v21, s44
; VI-NEXT: v_mov_b32_e32 v23, s47
@@ -36295,7 +35219,7 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v51, s43
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: .LBB59_5: ; %end
+; VI-NEXT: .LBB59_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v51, 16, v51
; VI-NEXT: v_lshlrev_b32_e32 v50, 16, v50
; VI-NEXT: v_lshlrev_b32_e32 v49, 16, v49
@@ -36377,9 +35301,9 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s44, s60, 16
; GFX9-NEXT: v_readfirstlane_b32 s4, v8
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
+; GFX9-NEXT: s_cbranch_execnz .LBB59_3
; GFX9-NEXT: .LBB59_2: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s75, s59
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
@@ -36448,10 +35372,8 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v19
; GFX9-NEXT: v_lshrrev_b32_e32 v23, 16, v20
; GFX9-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX9-NEXT: s_branch .LBB59_5
+; GFX9-NEXT: s_branch .LBB59_4
; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
-; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v21, s75
; GFX9-NEXT: v_mov_b32_e32 v20, s74
; GFX9-NEXT: v_mov_b32_e32 v19, s73
@@ -36496,7 +35418,7 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v49, s8
; GFX9-NEXT: v_mov_b32_e32 v50, s7
; GFX9-NEXT: v_mov_b32_e32 v51, s6
-; GFX9-NEXT: .LBB59_5: ; %end
+; GFX9-NEXT: .LBB59_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -36575,10 +35497,10 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s58, 16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s62, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s62, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s62
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s57, s61, s57
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s56, s60, s56
@@ -36646,10 +35568,8 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX11-TRUE16-NEXT: s_branch .LBB59_5
+; GFX11-TRUE16-NEXT: s_branch .LBB59_4
; GFX11-TRUE16-NEXT: .LBB59_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB59_2
-; GFX11-TRUE16-NEXT: .LBB59_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, s61 :: v_dual_mov_b32 v20, s60
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, s59 :: v_dual_mov_b32 v18, s58
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
@@ -36672,7 +35592,7 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s9 :: v_dual_mov_b32 v39, s8
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, s7 :: v_dual_mov_b32 v49, s6
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s4 :: v_dual_mov_b32 v51, s5
-; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB59_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
@@ -36741,10 +35661,10 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s58, 16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s62, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s62, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s62
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-FAKE16-NEXT: .LBB59_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s57, s61, s57
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s56, s60, s56
@@ -36812,10 +35732,8 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v18
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-FAKE16-NEXT: s_branch .LBB59_5
+; GFX11-FAKE16-NEXT: s_branch .LBB59_4
; GFX11-FAKE16-NEXT: .LBB59_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB59_2
-; GFX11-FAKE16-NEXT: .LBB59_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s61 :: v_dual_mov_b32 v18, s60
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, s59 :: v_dual_mov_b32 v20, s58
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s29 :: v_dual_mov_b32 v12, s28
@@ -36838,7 +35756,7 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v38, s9 :: v_dual_mov_b32 v39, s8
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v48, s7 :: v_dual_mov_b32 v49, s6
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v50, s4 :: v_dual_mov_b32 v51, s5
-; GFX11-FAKE16-NEXT: .LBB59_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB59_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
; GFX11-FAKE16-NEXT: v_and_b32_e32 v52, 0xffff, v1
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
index fe9d3ec2a0182..b14fb3f0b2983 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
@@ -188,7 +188,7 @@ define inreg <24 x float> @bitcast_v24i32_to_v24f32_scalar(<24 x i32> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s14, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s15, v0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -242,8 +242,6 @@ define inreg <24 x float> @bitcast_v24i32_to_v24f32_scalar(<24 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v22, s7
; SI-NEXT: v_mov_b32_e32 v23, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v24i32_to_v24f32_scalar:
; VI: ; %bb.0:
@@ -260,7 +258,7 @@ define inreg <24 x float> @bitcast_v24i32_to_v24f32_scalar(<24 x i32> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s14, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s15, v0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -314,8 +312,6 @@ define inreg <24 x float> @bitcast_v24i32_to_v24f32_scalar(<24 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v22, s7
; VI-NEXT: v_mov_b32_e32 v23, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v24i32_to_v24f32_scalar:
; GFX9: ; %bb.0:
@@ -332,7 +328,7 @@ define inreg <24 x float> @bitcast_v24i32_to_v24f32_scalar(<24 x i32> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s14, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s15, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -386,8 +382,6 @@ define inreg <24 x float> @bitcast_v24i32_to_v24f32_scalar(<24 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v22, s7
; GFX9-NEXT: v_mov_b32_e32 v23, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v24i32_to_v24f32_scalar:
; GFX11: ; %bb.0:
@@ -401,7 +395,7 @@ define inreg <24 x float> @bitcast_v24i32_to_v24f32_scalar(<24 x i32> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s9, v0
; GFX11-NEXT: s_cmp_lg_u32 s10, 0
; GFX11-NEXT: s_mov_b32 s10, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s10
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -445,8 +439,6 @@ define inreg <24 x float> @bitcast_v24i32_to_v24f32_scalar(<24 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v20, s7 :: v_dual_mov_b32 v21, s6
; GFX11-NEXT: v_dual_mov_b32 v22, s5 :: v_dual_mov_b32 v23, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -664,9 +656,9 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v23, s59, 1.0
; SI-NEXT: v_add_f32_e64 v22, s58, 1.0
@@ -692,10 +684,8 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB3_5
+; SI-NEXT: s_branch .LBB3_4
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -728,7 +718,7 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB3_5: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -791,9 +781,9 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v23, s59, 1.0
; VI-NEXT: v_add_f32_e64 v22, s58, 1.0
@@ -819,10 +809,8 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB3_5
+; VI-NEXT: s_branch .LBB3_4
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -855,7 +843,7 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB3_5: ; %end
+; VI-NEXT: .LBB3_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -918,9 +906,9 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v23, s59, 1.0
; GFX9-NEXT: v_add_f32_e64 v22, s58, 1.0
@@ -946,10 +934,8 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB3_5
+; GFX9-NEXT: s_branch .LBB3_4
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -982,7 +968,7 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB3_5: ; %end
+; GFX9-NEXT: .LBB3_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -1046,10 +1032,10 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v23, s59, 1.0
; GFX11-NEXT: v_add_f32_e64 v22, s58, 1.0
@@ -1075,10 +1061,8 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB3_5
+; GFX11-NEXT: s_branch .LBB3_4
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -1095,7 +1079,7 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB3_5: ; %end
+; GFX11-NEXT: .LBB3_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -1312,7 +1296,7 @@ define inreg <12 x i64> @bitcast_v24i32_to_v12i64_scalar(<24 x i32> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s14, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s15, v0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB5_3
; SI-NEXT: .LBB5_2: ; %cmp.true
@@ -1366,8 +1350,6 @@ define inreg <12 x i64> @bitcast_v24i32_to_v12i64_scalar(<24 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v22, s7
; SI-NEXT: v_mov_b32_e32 v23, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v24i32_to_v12i64_scalar:
; VI: ; %bb.0:
@@ -1384,7 +1366,7 @@ define inreg <12 x i64> @bitcast_v24i32_to_v12i64_scalar(<24 x i32> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s14, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s15, v0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -1438,8 +1420,6 @@ define inreg <12 x i64> @bitcast_v24i32_to_v12i64_scalar(<24 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v22, s7
; VI-NEXT: v_mov_b32_e32 v23, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v24i32_to_v12i64_scalar:
; GFX9: ; %bb.0:
@@ -1456,7 +1436,7 @@ define inreg <12 x i64> @bitcast_v24i32_to_v12i64_scalar(<24 x i32> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s14, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s15, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -1510,8 +1490,6 @@ define inreg <12 x i64> @bitcast_v24i32_to_v12i64_scalar(<24 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v22, s7
; GFX9-NEXT: v_mov_b32_e32 v23, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v24i32_to_v12i64_scalar:
; GFX11: ; %bb.0:
@@ -1525,7 +1503,7 @@ define inreg <12 x i64> @bitcast_v24i32_to_v12i64_scalar(<24 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s9, v0
; GFX11-NEXT: s_cmp_lg_u32 s10, 0
; GFX11-NEXT: s_mov_b32 s10, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s10
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -1569,8 +1547,6 @@ define inreg <12 x i64> @bitcast_v24i32_to_v12i64_scalar(<24 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v20, s7 :: v_dual_mov_b32 v21, s6
; GFX11-NEXT: v_dual_mov_b32 v22, s5 :: v_dual_mov_b32 v23, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1776,7 +1752,7 @@ define inreg <24 x i32> @bitcast_v12i64_to_v24i32_scalar(<12 x i64> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s14, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s15, v0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB7_3
; SI-NEXT: .LBB7_2: ; %cmp.true
@@ -1830,8 +1806,6 @@ define inreg <24 x i32> @bitcast_v12i64_to_v24i32_scalar(<12 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v22, s7
; SI-NEXT: v_mov_b32_e32 v23, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v12i64_to_v24i32_scalar:
; VI: ; %bb.0:
@@ -1848,7 +1822,7 @@ define inreg <24 x i32> @bitcast_v12i64_to_v24i32_scalar(<12 x i64> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s14, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s15, v0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -1902,8 +1876,6 @@ define inreg <24 x i32> @bitcast_v12i64_to_v24i32_scalar(<12 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v22, s7
; VI-NEXT: v_mov_b32_e32 v23, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v12i64_to_v24i32_scalar:
; GFX9: ; %bb.0:
@@ -1920,7 +1892,7 @@ define inreg <24 x i32> @bitcast_v12i64_to_v24i32_scalar(<12 x i64> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s14, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s15, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
@@ -1974,8 +1946,6 @@ define inreg <24 x i32> @bitcast_v12i64_to_v24i32_scalar(<12 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v22, s7
; GFX9-NEXT: v_mov_b32_e32 v23, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v12i64_to_v24i32_scalar:
; GFX11: ; %bb.0:
@@ -1989,7 +1959,7 @@ define inreg <24 x i32> @bitcast_v12i64_to_v24i32_scalar(<12 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s9, v0
; GFX11-NEXT: s_cmp_lg_u32 s10, 0
; GFX11-NEXT: s_mov_b32 s10, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s10
; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
@@ -2033,8 +2003,6 @@ define inreg <24 x i32> @bitcast_v12i64_to_v24i32_scalar(<12 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v20, s7 :: v_dual_mov_b32 v21, s6
; GFX11-NEXT: v_dual_mov_b32 v22, s5 :: v_dual_mov_b32 v23, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2234,7 +2202,7 @@ define inreg <12 x double> @bitcast_v24i32_to_v12f64_scalar(<24 x i32> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s14, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s15, v0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB9_3
; SI-NEXT: .LBB9_2: ; %cmp.true
@@ -2288,8 +2256,6 @@ define inreg <12 x double> @bitcast_v24i32_to_v12f64_scalar(<24 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v22, s7
; SI-NEXT: v_mov_b32_e32 v23, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v24i32_to_v12f64_scalar:
; VI: ; %bb.0:
@@ -2306,7 +2272,7 @@ define inreg <12 x double> @bitcast_v24i32_to_v12f64_scalar(<24 x i32> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s14, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s15, v0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -2360,8 +2326,6 @@ define inreg <12 x double> @bitcast_v24i32_to_v12f64_scalar(<24 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v22, s7
; VI-NEXT: v_mov_b32_e32 v23, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v24i32_to_v12f64_scalar:
; GFX9: ; %bb.0:
@@ -2378,7 +2342,7 @@ define inreg <12 x double> @bitcast_v24i32_to_v12f64_scalar(<24 x i32> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s14, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s15, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -2432,8 +2396,6 @@ define inreg <12 x double> @bitcast_v24i32_to_v12f64_scalar(<24 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v22, s7
; GFX9-NEXT: v_mov_b32_e32 v23, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v24i32_to_v12f64_scalar:
; GFX11: ; %bb.0:
@@ -2447,7 +2409,7 @@ define inreg <12 x double> @bitcast_v24i32_to_v12f64_scalar(<24 x i32> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s9, v0
; GFX11-NEXT: s_cmp_lg_u32 s10, 0
; GFX11-NEXT: s_mov_b32 s10, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s10
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -2491,8 +2453,6 @@ define inreg <12 x double> @bitcast_v24i32_to_v12f64_scalar(<24 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v20, s7 :: v_dual_mov_b32 v21, s6
; GFX11-NEXT: v_dual_mov_b32 v22, s5 :: v_dual_mov_b32 v23, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2674,9 +2634,9 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
; SI-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
@@ -2690,10 +2650,8 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; SI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; SI-NEXT: s_branch .LBB11_5
+; SI-NEXT: s_branch .LBB11_4
; SI-NEXT: .LBB11_3:
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -2726,7 +2684,7 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB11_5: ; %end
+; SI-NEXT: .LBB11_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -2789,9 +2747,9 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
; VI-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
@@ -2805,10 +2763,8 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; VI-NEXT: s_branch .LBB11_5
+; VI-NEXT: s_branch .LBB11_4
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -2841,7 +2797,7 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB11_5: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -2904,9 +2860,9 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
; GFX9-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
@@ -2920,10 +2876,8 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX9-NEXT: s_branch .LBB11_5
+; GFX9-NEXT: s_branch .LBB11_4
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -2956,7 +2910,7 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB11_5: ; %end
+; GFX9-NEXT: .LBB11_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -3020,10 +2974,10 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
; GFX11-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
@@ -3037,10 +2991,8 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX11-NEXT: s_branch .LBB11_5
+; GFX11-NEXT: s_branch .LBB11_4
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -3057,7 +3009,7 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB11_5: ; %end
+; GFX11-NEXT: .LBB11_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -3790,7 +3742,7 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; SI-NEXT: s_cmp_lg_u32 s12, 0
; SI-NEXT: v_readfirstlane_b32 s12, v0
; SI-NEXT: v_writelane_b32 v24, s35, 3
-; SI-NEXT: s_cbranch_scc0 .LBB13_4
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s88, s5, 16
; SI-NEXT: s_lshr_b32 s89, s7, 16
@@ -3972,32 +3924,6 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB13_4:
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: s_branch .LBB13_2
;
; VI-LABEL: bitcast_v24i32_to_v48i16_scalar:
; VI: ; %bb.0:
@@ -4014,7 +3940,7 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s14, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s15, v0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s40, s6, 16
; VI-NEXT: s_lshr_b32 s41, s7, 16
@@ -4188,32 +4114,6 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v22, s7
; VI-NEXT: v_mov_b32_e32 v23, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v24i32_to_v48i16_scalar:
; GFX9: ; %bb.0:
@@ -4230,7 +4130,7 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s14, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s15, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s40, s6, 16
; GFX9-NEXT: s_lshr_b32 s41, s7, 16
@@ -4356,32 +4256,6 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v22, s7
; GFX9-NEXT: v_mov_b32_e32 v23, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v24i32_to_v48i16_scalar:
; GFX11: ; %bb.0:
@@ -4395,7 +4269,7 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s9, v0
; GFX11-NEXT: s_cmp_lg_u32 s10, 0
; GFX11-NEXT: s_mov_b32 s74, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s10, s4, 16
; GFX11-NEXT: s_lshr_b32 s11, s5, 16
@@ -4511,32 +4385,6 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v20, s7 :: v_dual_mov_b32 v21, s6
; GFX11-NEXT: v_dual_mov_b32 v22, s5 :: v_dual_mov_b32 v23, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5669,7 +5517,7 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; SI-NEXT: s_lshr_b32 s93, s94, 16
; SI-NEXT: v_readfirstlane_b32 s4, v10
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s71, 16
@@ -5919,9 +5767,6 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v48i16_to_v24i32_scalar:
; VI: ; %bb.0:
@@ -5993,7 +5838,7 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; VI-NEXT: s_lshr_b32 s31, s34, 16
; VI-NEXT: v_readfirstlane_b32 s4, v10
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s83, 16
@@ -6247,9 +6092,6 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v48i16_to_v24i32_scalar:
; GFX9: ; %bb.0:
@@ -6329,9 +6171,9 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s57, s57, s62
; GFX9-NEXT: s_pack_ll_b32_b16 s58, s58, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s59, s59, s60
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -6357,10 +6199,8 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v21, s57, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v22, s58, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v23, s59, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB15_5
+; GFX9-NEXT: s_branch .LBB15_4
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -6393,7 +6233,7 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB15_5: ; %end
+; GFX9-NEXT: .LBB15_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -6472,10 +6312,10 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s21, s58, s63
; GFX11-NEXT: s_pack_ll_b32_b16 s22, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s23, s44, s57
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -6503,8 +6343,6 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v23, s23, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -7237,7 +7075,7 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; SI-NEXT: s_cmp_lg_u32 s12, 0
; SI-NEXT: v_readfirstlane_b32 s12, v0
; SI-NEXT: v_writelane_b32 v24, s35, 3
-; SI-NEXT: s_cbranch_scc0 .LBB17_4
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s88, s5, 16
; SI-NEXT: s_lshr_b32 s89, s7, 16
@@ -7419,32 +7257,6 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB17_4:
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: s_branch .LBB17_2
;
; VI-LABEL: bitcast_v24i32_to_v48f16_scalar:
; VI: ; %bb.0:
@@ -7461,7 +7273,7 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s14, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s15, v0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s40, s6, 16
; VI-NEXT: s_lshr_b32 s41, s7, 16
@@ -7635,32 +7447,6 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v22, s7
; VI-NEXT: v_mov_b32_e32 v23, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v24i32_to_v48f16_scalar:
; GFX9: ; %bb.0:
@@ -7677,7 +7463,7 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s14, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s15, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s40, s6, 16
; GFX9-NEXT: s_lshr_b32 s41, s7, 16
@@ -7803,32 +7589,6 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v22, s7
; GFX9-NEXT: v_mov_b32_e32 v23, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v24i32_to_v48f16_scalar:
; GFX11: ; %bb.0:
@@ -7842,7 +7602,7 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s9, v0
; GFX11-NEXT: s_cmp_lg_u32 s10, 0
; GFX11-NEXT: s_mov_b32 s74, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s10, s4, 16
; GFX11-NEXT: s_lshr_b32 s11, s5, 16
@@ -7958,32 +7718,6 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v20, s7 :: v_dual_mov_b32 v21, s6
; GFX11-NEXT: v_dual_mov_b32 v22, s5 :: v_dual_mov_b32 v23, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9228,7 +8962,7 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s91, s89, 16
; SI-NEXT: v_readfirstlane_b32 s4, v10
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s71, 16
@@ -9302,7 +9036,7 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s59, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB19_4
+; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s71
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -9496,11 +9230,8 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v22, v23, v22
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v24
; SI-NEXT: v_or_b32_e32 v23, v25, v23
-; SI-NEXT: s_branch .LBB19_5
+; SI-NEXT: s_branch .LBB19_4
; SI-NEXT: .LBB19_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB19_2
-; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -9533,7 +9264,7 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB19_5: ; %end
+; SI-NEXT: .LBB19_4: ; %end
; SI-NEXT: v_readlane_b32 s71, v32, 23
; SI-NEXT: v_readlane_b32 s70, v32, 22
; SI-NEXT: v_readlane_b32 s69, v32, 21
@@ -9634,7 +9365,7 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s35, s34, 16
; VI-NEXT: v_readfirstlane_b32 s4, v10
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s83, 16
@@ -9708,7 +9439,7 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s59, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB19_4
+; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v23, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s83
@@ -9807,11 +9538,8 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; VI-NEXT: v_add_f16_sdwa v24, v24, v23 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v23, s6, v23
; VI-NEXT: v_or_b32_e32 v23, v23, v24
-; VI-NEXT: s_branch .LBB19_5
+; VI-NEXT: s_branch .LBB19_4
; VI-NEXT: .LBB19_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB19_2
-; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -9844,7 +9572,7 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB19_5: ; %end
+; VI-NEXT: .LBB19_4: ; %end
; VI-NEXT: v_readlane_b32 s83, v32, 27
; VI-NEXT: v_readlane_b32 s82, v32, 26
; VI-NEXT: v_readlane_b32 s81, v32, 25
@@ -9957,9 +9685,9 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s57, s57, s62
; GFX9-NEXT: s_pack_ll_b32_b16 s58, s58, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s59, s59, s60
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v23, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v23 op_sel_hi:[1,0]
@@ -9986,10 +9714,8 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v21, s57, v23 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v22, s58, v23 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v23, s59, v23 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB19_5
+; GFX9-NEXT: s_branch .LBB19_4
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -10022,7 +9748,7 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB19_5: ; %end
+; GFX9-NEXT: .LBB19_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -10101,10 +9827,10 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s21, s58, s63
; GFX11-NEXT: s_pack_ll_b32_b16 s22, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s23, s44, s57
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -10132,8 +9858,6 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v23, 0x200, s23 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -10368,9 +10092,9 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB21_3
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_4
+; SI-NEXT: s_cbranch_execnz .LBB21_3
; SI-NEXT: .LBB21_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v23, s59, 1.0
; SI-NEXT: v_add_f32_e64 v22, s58, 1.0
@@ -10396,10 +10120,8 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB21_5
+; SI-NEXT: s_branch .LBB21_4
; SI-NEXT: .LBB21_3:
-; SI-NEXT: s_branch .LBB21_2
-; SI-NEXT: .LBB21_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -10432,7 +10154,7 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB21_5: ; %end
+; SI-NEXT: .LBB21_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -10495,9 +10217,9 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB21_3
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_4
+; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v23, s59, 1.0
; VI-NEXT: v_add_f32_e64 v22, s58, 1.0
@@ -10523,10 +10245,8 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB21_5
+; VI-NEXT: s_branch .LBB21_4
; VI-NEXT: .LBB21_3:
-; VI-NEXT: s_branch .LBB21_2
-; VI-NEXT: .LBB21_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -10559,7 +10279,7 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB21_5: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -10622,9 +10342,9 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
+; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v23, s59, 1.0
; GFX9-NEXT: v_add_f32_e64 v22, s58, 1.0
@@ -10650,10 +10370,8 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB21_5
+; GFX9-NEXT: s_branch .LBB21_4
; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
-; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -10686,7 +10404,7 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB21_5: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -10750,10 +10468,10 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
; GFX11-NEXT: .LBB21_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v23, s59, 1.0
; GFX11-NEXT: v_add_f32_e64 v22, s58, 1.0
@@ -10779,10 +10497,8 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB21_5
+; GFX11-NEXT: s_branch .LBB21_4
; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
-; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -10799,7 +10515,7 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB21_5: ; %end
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -11022,7 +10738,7 @@ define inreg <24 x float> @bitcast_v12i64_to_v24f32_scalar(<12 x i64> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s14, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s15, v0
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB23_3
; SI-NEXT: .LBB23_2: ; %cmp.true
@@ -11076,8 +10792,6 @@ define inreg <24 x float> @bitcast_v12i64_to_v24f32_scalar(<12 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v22, s7
; SI-NEXT: v_mov_b32_e32 v23, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v12i64_to_v24f32_scalar:
; VI: ; %bb.0:
@@ -11094,7 +10808,7 @@ define inreg <24 x float> @bitcast_v12i64_to_v24f32_scalar(<12 x i64> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s14, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s15, v0
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
@@ -11148,8 +10862,6 @@ define inreg <24 x float> @bitcast_v12i64_to_v24f32_scalar(<12 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v22, s7
; VI-NEXT: v_mov_b32_e32 v23, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v12i64_to_v24f32_scalar:
; GFX9: ; %bb.0:
@@ -11166,7 +10878,7 @@ define inreg <24 x float> @bitcast_v12i64_to_v24f32_scalar(<12 x i64> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s14, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s15, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
@@ -11220,8 +10932,6 @@ define inreg <24 x float> @bitcast_v12i64_to_v24f32_scalar(<12 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v22, s7
; GFX9-NEXT: v_mov_b32_e32 v23, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v12i64_to_v24f32_scalar:
; GFX11: ; %bb.0:
@@ -11235,7 +10945,7 @@ define inreg <24 x float> @bitcast_v12i64_to_v24f32_scalar(<12 x i64> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s9, v0
; GFX11-NEXT: s_cmp_lg_u32 s10, 0
; GFX11-NEXT: s_mov_b32 s10, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s10
; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
@@ -11279,8 +10989,6 @@ define inreg <24 x float> @bitcast_v12i64_to_v24f32_scalar(<12 x i64> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v20, s7 :: v_dual_mov_b32 v21, s6
; GFX11-NEXT: v_dual_mov_b32 v22, s5 :: v_dual_mov_b32 v23, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: s_branch .LBB23_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11498,9 +11206,9 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB25_3
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_4
+; SI-NEXT: s_cbranch_execnz .LBB25_3
; SI-NEXT: .LBB25_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v23, s59, 1.0
; SI-NEXT: v_add_f32_e64 v22, s58, 1.0
@@ -11526,10 +11234,8 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB25_5
+; SI-NEXT: s_branch .LBB25_4
; SI-NEXT: .LBB25_3:
-; SI-NEXT: s_branch .LBB25_2
-; SI-NEXT: .LBB25_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -11562,7 +11268,7 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB25_5: ; %end
+; SI-NEXT: .LBB25_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -11625,9 +11331,9 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB25_3
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_4
+; VI-NEXT: s_cbranch_execnz .LBB25_3
; VI-NEXT: .LBB25_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v23, s59, 1.0
; VI-NEXT: v_add_f32_e64 v22, s58, 1.0
@@ -11653,10 +11359,8 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB25_5
+; VI-NEXT: s_branch .LBB25_4
; VI-NEXT: .LBB25_3:
-; VI-NEXT: s_branch .LBB25_2
-; VI-NEXT: .LBB25_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -11689,7 +11393,7 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB25_5: ; %end
+; VI-NEXT: .LBB25_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -11752,9 +11456,9 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_4
+; GFX9-NEXT: s_cbranch_execnz .LBB25_3
; GFX9-NEXT: .LBB25_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v23, s59, 1.0
; GFX9-NEXT: v_add_f32_e64 v22, s58, 1.0
@@ -11780,10 +11484,8 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB25_5
+; GFX9-NEXT: s_branch .LBB25_4
; GFX9-NEXT: .LBB25_3:
-; GFX9-NEXT: s_branch .LBB25_2
-; GFX9-NEXT: .LBB25_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -11816,7 +11518,7 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB25_5: ; %end
+; GFX9-NEXT: .LBB25_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -11880,10 +11582,10 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_3
; GFX11-NEXT: .LBB25_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v23, s59, 1.0
; GFX11-NEXT: v_add_f32_e64 v22, s58, 1.0
@@ -11909,10 +11611,8 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB25_5
+; GFX11-NEXT: s_branch .LBB25_4
; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
-; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -11929,7 +11629,7 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB25_5: ; %end
+; GFX11-NEXT: .LBB25_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -12128,9 +11828,9 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB27_3
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB27_4
+; SI-NEXT: s_cbranch_execnz .LBB27_3
; SI-NEXT: .LBB27_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
; SI-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
@@ -12144,10 +11844,8 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; SI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; SI-NEXT: s_branch .LBB27_5
+; SI-NEXT: s_branch .LBB27_4
; SI-NEXT: .LBB27_3:
-; SI-NEXT: s_branch .LBB27_2
-; SI-NEXT: .LBB27_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -12180,7 +11878,7 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB27_5: ; %end
+; SI-NEXT: .LBB27_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -12243,9 +11941,9 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB27_3
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_4
+; VI-NEXT: s_cbranch_execnz .LBB27_3
; VI-NEXT: .LBB27_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
; VI-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
@@ -12259,10 +11957,8 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; VI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; VI-NEXT: s_branch .LBB27_5
+; VI-NEXT: s_branch .LBB27_4
; VI-NEXT: .LBB27_3:
-; VI-NEXT: s_branch .LBB27_2
-; VI-NEXT: .LBB27_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -12295,7 +11991,7 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB27_5: ; %end
+; VI-NEXT: .LBB27_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -12358,9 +12054,9 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_4
+; GFX9-NEXT: s_cbranch_execnz .LBB27_3
; GFX9-NEXT: .LBB27_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
; GFX9-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
@@ -12374,10 +12070,8 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; GFX9-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX9-NEXT: s_branch .LBB27_5
+; GFX9-NEXT: s_branch .LBB27_4
; GFX9-NEXT: .LBB27_3:
-; GFX9-NEXT: s_branch .LBB27_2
-; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -12410,7 +12104,7 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB27_5: ; %end
+; GFX9-NEXT: .LBB27_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -12474,10 +12168,10 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB27_3
; GFX11-NEXT: .LBB27_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
; GFX11-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
@@ -12491,10 +12185,8 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; GFX11-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX11-NEXT: s_branch .LBB27_5
+; GFX11-NEXT: s_branch .LBB27_4
; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
-; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -12511,7 +12203,7 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB27_5: ; %end
+; GFX11-NEXT: .LBB27_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -13220,7 +12912,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; SI-NEXT: s_cmp_lg_u32 s12, 0
; SI-NEXT: v_readfirstlane_b32 s12, v0
; SI-NEXT: v_writelane_b32 v40, s35, 3
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s35, s5, 16
; SI-NEXT: s_lshr_b32 s34, s7, 16
@@ -13246,7 +12938,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[72:73], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[74:75], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[76:77], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB29_4
+; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v23, s5, 1.0
; SI-NEXT: v_add_f32_e64 v22, s4, 1.0
@@ -13296,34 +12988,8 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v1
-; SI-NEXT: s_branch .LBB29_5
+; SI-NEXT: s_branch .LBB29_4
; SI-NEXT: .LBB29_3:
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: s_branch .LBB29_2
-; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -13372,7 +13038,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v33, s72
; SI-NEXT: v_mov_b32_e32 v34, s74
; SI-NEXT: v_mov_b32_e32 v35, s76
-; SI-NEXT: .LBB29_5: ; %end
+; SI-NEXT: .LBB29_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v35
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v34
@@ -13470,7 +13136,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s14, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s15, v0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s40, s6, 16
; VI-NEXT: s_lshr_b32 s41, s7, 16
@@ -13496,7 +13162,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; VI-NEXT: s_lshr_b32 s77, s18, 16
; VI-NEXT: s_lshr_b32 s78, s17, 16
; VI-NEXT: s_lshr_b32 s79, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB29_4
+; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v23, s6, 1.0
; VI-NEXT: v_add_f32_e64 v22, s7, 1.0
@@ -13546,34 +13212,8 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v53, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v54, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; VI-NEXT: s_branch .LBB29_5
+; VI-NEXT: s_branch .LBB29_4
; VI-NEXT: .LBB29_3:
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: s_branch .LBB29_2
-; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -13622,7 +13262,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v26, s42
; VI-NEXT: v_mov_b32_e32 v25, s41
; VI-NEXT: v_mov_b32_e32 v24, s40
-; VI-NEXT: .LBB29_5: ; %end
+; VI-NEXT: .LBB29_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v55, 16, v55
; VI-NEXT: v_lshlrev_b32_e32 v54, 16, v54
; VI-NEXT: v_lshlrev_b32_e32 v53, 16, v53
@@ -13688,7 +13328,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s14, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s15, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s40, s6, 16
; GFX9-NEXT: s_lshr_b32 s41, s7, 16
@@ -13714,7 +13354,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: s_lshr_b32 s77, s18, 16
; GFX9-NEXT: s_lshr_b32 s78, s17, 16
; GFX9-NEXT: s_lshr_b32 s79, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
+; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v23, s6, 1.0
; GFX9-NEXT: v_add_f32_e64 v22, s7, 1.0
@@ -13764,34 +13404,8 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX9-NEXT: s_branch .LBB29_5
+; GFX9-NEXT: s_branch .LBB29_4
; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: s_branch .LBB29_2
-; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -13840,7 +13454,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v26, s42
; GFX9-NEXT: v_mov_b32_e32 v25, s41
; GFX9-NEXT: v_mov_b32_e32 v24, s40
-; GFX9-NEXT: .LBB29_5: ; %end
+; GFX9-NEXT: .LBB29_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -13903,7 +13517,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s9, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s10, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s10, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s4, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s5, 16
@@ -13930,7 +13544,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s73, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s74, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s10
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f32_e64 v23, s4, 1.0
; GFX11-TRUE16-NEXT: v_add_f32_e64 v22, s5, 1.0
@@ -13980,34 +13594,8 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB29_5
+; GFX11-TRUE16-NEXT: s_branch .LBB29_4
; GFX11-TRUE16-NEXT: .LBB29_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-TRUE16-NEXT: s_branch .LBB29_2
-; GFX11-TRUE16-NEXT: .LBB29_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -14032,7 +13620,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
-; GFX11-TRUE16-NEXT: .LBB29_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB29_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
@@ -14084,7 +13672,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s9, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s10, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s10, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s4, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s5, 16
@@ -14111,7 +13699,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s73, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s74, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s10
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, s4, 1.0
; GFX11-FAKE16-NEXT: v_add_f32_e64 v20, s5, 1.0
@@ -14161,34 +13749,8 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v1
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v2
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v3
-; GFX11-FAKE16-NEXT: s_branch .LBB29_5
+; GFX11-FAKE16-NEXT: s_branch .LBB29_4
; GFX11-FAKE16-NEXT: .LBB29_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-FAKE16-NEXT: s_branch .LBB29_2
-; GFX11-FAKE16-NEXT: .LBB29_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s0 :: v_dual_mov_b32 v2, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v0, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s16 :: v_dual_mov_b32 v7, s17
@@ -14213,7 +13775,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
-; GFX11-FAKE16-NEXT: .LBB29_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB29_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v64, 0xffff, v1
@@ -15395,7 +14957,7 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; SI-NEXT: s_lshr_b32 s93, s94, 16
; SI-NEXT: v_readfirstlane_b32 s4, v10
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s71, 16
@@ -15645,9 +15207,6 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v48i16_to_v24f32_scalar:
; VI: ; %bb.0:
@@ -15719,7 +15278,7 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; VI-NEXT: s_lshr_b32 s31, s34, 16
; VI-NEXT: v_readfirstlane_b32 s4, v10
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s83, 16
@@ -15973,9 +15532,6 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v48i16_to_v24f32_scalar:
; GFX9: ; %bb.0:
@@ -16055,9 +15611,9 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s57, s57, s62
; GFX9-NEXT: s_pack_ll_b32_b16 s58, s58, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s59, s59, s60
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_4
+; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -16083,10 +15639,8 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v21, s57, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v22, s58, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v23, s59, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB31_5
+; GFX9-NEXT: s_branch .LBB31_4
; GFX9-NEXT: .LBB31_3:
-; GFX9-NEXT: s_branch .LBB31_2
-; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -16119,7 +15673,7 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB31_5: ; %end
+; GFX9-NEXT: .LBB31_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -16198,10 +15752,10 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s21, s58, s63
; GFX11-NEXT: s_pack_ll_b32_b16 s22, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s23, s44, s57
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-NEXT: .LBB31_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -16229,8 +15783,6 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v23, s23, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
-; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -16939,7 +16491,7 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; SI-NEXT: s_cmp_lg_u32 s12, 0
; SI-NEXT: v_readfirstlane_b32 s12, v0
; SI-NEXT: v_writelane_b32 v40, s35, 3
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s35, s5, 16
; SI-NEXT: s_lshr_b32 s34, s7, 16
@@ -16965,7 +16517,7 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[72:73], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[74:75], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[76:77], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB33_4
+; SI-NEXT: s_cbranch_execnz .LBB33_3
; SI-NEXT: .LBB33_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v23, s5, 1.0
; SI-NEXT: v_add_f32_e64 v22, s4, 1.0
@@ -17015,34 +16567,8 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v1
-; SI-NEXT: s_branch .LBB33_5
+; SI-NEXT: s_branch .LBB33_4
; SI-NEXT: .LBB33_3:
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: s_branch .LBB33_2
-; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -17091,7 +16617,7 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v33, s72
; SI-NEXT: v_mov_b32_e32 v34, s74
; SI-NEXT: v_mov_b32_e32 v35, s76
-; SI-NEXT: .LBB33_5: ; %end
+; SI-NEXT: .LBB33_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v35
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v34
@@ -17189,7 +16715,7 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s14, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s15, v0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s40, s6, 16
; VI-NEXT: s_lshr_b32 s41, s7, 16
@@ -17215,7 +16741,7 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; VI-NEXT: s_lshr_b32 s77, s18, 16
; VI-NEXT: s_lshr_b32 s78, s17, 16
; VI-NEXT: s_lshr_b32 s79, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB33_4
+; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v23, s6, 1.0
; VI-NEXT: v_add_f32_e64 v22, s7, 1.0
@@ -17265,34 +16791,8 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v53, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v54, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; VI-NEXT: s_branch .LBB33_5
+; VI-NEXT: s_branch .LBB33_4
; VI-NEXT: .LBB33_3:
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: s_branch .LBB33_2
-; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -17341,7 +16841,7 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v26, s42
; VI-NEXT: v_mov_b32_e32 v25, s41
; VI-NEXT: v_mov_b32_e32 v24, s40
-; VI-NEXT: .LBB33_5: ; %end
+; VI-NEXT: .LBB33_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v55, 16, v55
; VI-NEXT: v_lshlrev_b32_e32 v54, 16, v54
; VI-NEXT: v_lshlrev_b32_e32 v53, 16, v53
@@ -17407,7 +16907,7 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s14, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s15, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s40, s6, 16
; GFX9-NEXT: s_lshr_b32 s41, s7, 16
@@ -17433,7 +16933,7 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: s_lshr_b32 s77, s18, 16
; GFX9-NEXT: s_lshr_b32 s78, s17, 16
; GFX9-NEXT: s_lshr_b32 s79, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
+; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v23, s6, 1.0
; GFX9-NEXT: v_add_f32_e64 v22, s7, 1.0
@@ -17483,34 +16983,8 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX9-NEXT: s_branch .LBB33_5
+; GFX9-NEXT: s_branch .LBB33_4
; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: s_branch .LBB33_2
-; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -17559,7 +17033,7 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v26, s42
; GFX9-NEXT: v_mov_b32_e32 v25, s41
; GFX9-NEXT: v_mov_b32_e32 v24, s40
-; GFX9-NEXT: .LBB33_5: ; %end
+; GFX9-NEXT: .LBB33_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -17622,7 +17096,7 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s9, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s10, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s10, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s4, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s5, 16
@@ -17649,7 +17123,7 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s73, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s74, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s10
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f32_e64 v23, s4, 1.0
; GFX11-TRUE16-NEXT: v_add_f32_e64 v22, s5, 1.0
@@ -17699,34 +17173,8 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB33_5
+; GFX11-TRUE16-NEXT: s_branch .LBB33_4
; GFX11-TRUE16-NEXT: .LBB33_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-TRUE16-NEXT: s_branch .LBB33_2
-; GFX11-TRUE16-NEXT: .LBB33_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -17751,7 +17199,7 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
-; GFX11-TRUE16-NEXT: .LBB33_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB33_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
@@ -17803,7 +17251,7 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s9, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s10, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s10, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s4, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s5, 16
@@ -17830,7 +17278,7 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s73, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s74, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s10
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, s4, 1.0
; GFX11-FAKE16-NEXT: v_add_f32_e64 v20, s5, 1.0
@@ -17880,34 +17328,8 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v1
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v2
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v3
-; GFX11-FAKE16-NEXT: s_branch .LBB33_5
+; GFX11-FAKE16-NEXT: s_branch .LBB33_4
; GFX11-FAKE16-NEXT: .LBB33_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-FAKE16-NEXT: s_branch .LBB33_2
-; GFX11-FAKE16-NEXT: .LBB33_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s0 :: v_dual_mov_b32 v2, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v0, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s16 :: v_dual_mov_b32 v7, s17
@@ -17932,7 +17354,7 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
-; GFX11-FAKE16-NEXT: .LBB33_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB33_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v64, 0xffff, v1
@@ -19226,7 +18648,7 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; SI-NEXT: s_lshr_b32 s91, s89, 16
; SI-NEXT: v_readfirstlane_b32 s4, v10
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB35_3
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s71, 16
@@ -19300,7 +18722,7 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s59, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB35_4
+; SI-NEXT: s_cbranch_execnz .LBB35_3
; SI-NEXT: .LBB35_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s71
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -19494,11 +18916,8 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v22, v23, v22
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v24
; SI-NEXT: v_or_b32_e32 v23, v25, v23
-; SI-NEXT: s_branch .LBB35_5
+; SI-NEXT: s_branch .LBB35_4
; SI-NEXT: .LBB35_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB35_2
-; SI-NEXT: .LBB35_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -19531,7 +18950,7 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB35_5: ; %end
+; SI-NEXT: .LBB35_4: ; %end
; SI-NEXT: v_readlane_b32 s71, v32, 23
; SI-NEXT: v_readlane_b32 s70, v32, 22
; SI-NEXT: v_readlane_b32 s69, v32, 21
@@ -19632,7 +19051,7 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; VI-NEXT: s_lshr_b32 s35, s34, 16
; VI-NEXT: v_readfirstlane_b32 s4, v10
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s83, 16
@@ -19706,7 +19125,7 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s59, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB35_4
+; VI-NEXT: s_cbranch_execnz .LBB35_3
; VI-NEXT: .LBB35_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v23, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s83
@@ -19805,11 +19224,8 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; VI-NEXT: v_add_f16_sdwa v24, v24, v23 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v23, s6, v23
; VI-NEXT: v_or_b32_e32 v23, v23, v24
-; VI-NEXT: s_branch .LBB35_5
+; VI-NEXT: s_branch .LBB35_4
; VI-NEXT: .LBB35_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB35_2
-; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -19842,7 +19258,7 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB35_5: ; %end
+; VI-NEXT: .LBB35_4: ; %end
; VI-NEXT: v_readlane_b32 s83, v32, 27
; VI-NEXT: v_readlane_b32 s82, v32, 26
; VI-NEXT: v_readlane_b32 s81, v32, 25
@@ -19955,9 +19371,9 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s57, s57, s62
; GFX9-NEXT: s_pack_ll_b32_b16 s58, s58, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s59, s59, s60
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
+; GFX9-NEXT: s_cbranch_execnz .LBB35_3
; GFX9-NEXT: .LBB35_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v23, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v23 op_sel_hi:[1,0]
@@ -19984,10 +19400,8 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v21, s57, v23 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v22, s58, v23 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v23, s59, v23 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB35_5
+; GFX9-NEXT: s_branch .LBB35_4
; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
-; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -20020,7 +19434,7 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB35_5: ; %end
+; GFX9-NEXT: .LBB35_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -20099,10 +19513,10 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s21, s58, s63
; GFX11-NEXT: s_pack_ll_b32_b16 s22, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s23, s44, s57
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-NEXT: .LBB35_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -20130,8 +19544,6 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v23, 0x200, s23 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
-; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -20354,7 +19766,7 @@ define inreg <12 x double> @bitcast_v12i64_to_v12f64_scalar(<12 x i64> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s14, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s15, v0
-; SI-NEXT: s_cbranch_scc0 .LBB37_4
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB37_3
; SI-NEXT: .LBB37_2: ; %cmp.true
@@ -20408,8 +19820,6 @@ define inreg <12 x double> @bitcast_v12i64_to_v12f64_scalar(<12 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v22, s7
; SI-NEXT: v_mov_b32_e32 v23, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_4:
-; SI-NEXT: s_branch .LBB37_2
;
; VI-LABEL: bitcast_v12i64_to_v12f64_scalar:
; VI: ; %bb.0:
@@ -20426,7 +19836,7 @@ define inreg <12 x double> @bitcast_v12i64_to_v12f64_scalar(<12 x i64> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s14, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s15, v0
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB37_3
; VI-NEXT: .LBB37_2: ; %cmp.true
@@ -20480,8 +19890,6 @@ define inreg <12 x double> @bitcast_v12i64_to_v12f64_scalar(<12 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v22, s7
; VI-NEXT: v_mov_b32_e32 v23, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v12i64_to_v12f64_scalar:
; GFX9: ; %bb.0:
@@ -20498,7 +19906,7 @@ define inreg <12 x double> @bitcast_v12i64_to_v12f64_scalar(<12 x i64> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s14, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s15, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB37_3
; GFX9-NEXT: .LBB37_2: ; %cmp.true
@@ -20552,8 +19960,6 @@ define inreg <12 x double> @bitcast_v12i64_to_v12f64_scalar(<12 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v22, s7
; GFX9-NEXT: v_mov_b32_e32 v23, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v12i64_to_v12f64_scalar:
; GFX11: ; %bb.0:
@@ -20567,7 +19973,7 @@ define inreg <12 x double> @bitcast_v12i64_to_v12f64_scalar(<12 x i64> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s9, v0
; GFX11-NEXT: s_cmp_lg_u32 s10, 0
; GFX11-NEXT: s_mov_b32 s10, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s10
; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
@@ -20610,8 +20016,6 @@ define inreg <12 x double> @bitcast_v12i64_to_v12f64_scalar(<12 x i64> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v20, s7 :: v_dual_mov_b32 v21, s6
; GFX11-NEXT: v_dual_mov_b32 v22, s5 :: v_dual_mov_b32 v23, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_4:
-; GFX11-NEXT: s_branch .LBB37_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -20793,9 +20197,9 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB39_3
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_4
+; SI-NEXT: s_cbranch_execnz .LBB39_3
; SI-NEXT: .LBB39_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -20809,10 +20213,8 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; SI-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
; SI-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
; SI-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
-; SI-NEXT: s_branch .LBB39_5
+; SI-NEXT: s_branch .LBB39_4
; SI-NEXT: .LBB39_3:
-; SI-NEXT: s_branch .LBB39_2
-; SI-NEXT: .LBB39_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -20845,7 +20247,7 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB39_5: ; %end
+; SI-NEXT: .LBB39_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -20908,9 +20310,9 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB39_3
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_4
+; VI-NEXT: s_cbranch_execnz .LBB39_3
; VI-NEXT: .LBB39_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -20924,10 +20326,8 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; VI-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
; VI-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
; VI-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
-; VI-NEXT: s_branch .LBB39_5
+; VI-NEXT: s_branch .LBB39_4
; VI-NEXT: .LBB39_3:
-; VI-NEXT: s_branch .LBB39_2
-; VI-NEXT: .LBB39_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -20960,7 +20360,7 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB39_5: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -21023,9 +20423,9 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
+; GFX9-NEXT: s_cbranch_execnz .LBB39_3
; GFX9-NEXT: .LBB39_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -21039,10 +20439,8 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
; GFX9-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
; GFX9-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
-; GFX9-NEXT: s_branch .LBB39_5
+; GFX9-NEXT: s_branch .LBB39_4
; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
-; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -21075,7 +20473,7 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB39_5: ; %end
+; GFX9-NEXT: .LBB39_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -21139,10 +20537,10 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_3
; GFX11-NEXT: .LBB39_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -21156,10 +20554,8 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[18:19], s[54:55], 1.0
; GFX11-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
; GFX11-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
-; GFX11-NEXT: s_branch .LBB39_5
+; GFX11-NEXT: s_branch .LBB39_4
; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
-; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -21176,7 +20572,7 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB39_5: ; %end
+; GFX11-NEXT: .LBB39_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -21921,7 +21317,7 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; SI-NEXT: s_cmp_lg_u32 s12, 0
; SI-NEXT: v_readfirstlane_b32 s12, v0
; SI-NEXT: v_writelane_b32 v24, s35, 3
-; SI-NEXT: s_cbranch_scc0 .LBB41_4
+; SI-NEXT: s_cbranch_scc0 .LBB41_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s88, s5, 16
; SI-NEXT: s_lshr_b32 s89, s7, 16
@@ -22103,32 +21499,6 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB41_4:
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: s_branch .LBB41_2
;
; VI-LABEL: bitcast_v12i64_to_v48i16_scalar:
; VI: ; %bb.0:
@@ -22145,7 +21515,7 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s14, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s15, v0
-; VI-NEXT: s_cbranch_scc0 .LBB41_4
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s40, s6, 16
; VI-NEXT: s_lshr_b32 s41, s7, 16
@@ -22319,32 +21689,6 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v22, s7
; VI-NEXT: v_mov_b32_e32 v23, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB41_4:
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: s_branch .LBB41_2
;
; GFX9-LABEL: bitcast_v12i64_to_v48i16_scalar:
; GFX9: ; %bb.0:
@@ -22361,7 +21705,7 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s14, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s15, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s40, s6, 16
; GFX9-NEXT: s_lshr_b32 s41, s7, 16
@@ -22487,32 +21831,6 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v22, s7
; GFX9-NEXT: v_mov_b32_e32 v23, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB41_4:
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: s_branch .LBB41_2
;
; GFX11-LABEL: bitcast_v12i64_to_v48i16_scalar:
; GFX11: ; %bb.0:
@@ -22526,7 +21844,7 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s9, v0
; GFX11-NEXT: s_cmp_lg_u32 s10, 0
; GFX11-NEXT: s_mov_b32 s74, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s10, s4, 16
; GFX11-NEXT: s_lshr_b32 s11, s5, 16
@@ -22642,32 +21960,6 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v20, s7 :: v_dual_mov_b32 v21, s6
; GFX11-NEXT: v_dual_mov_b32 v22, s5 :: v_dual_mov_b32 v23, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB41_4:
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: s_branch .LBB41_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -23800,7 +23092,7 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; SI-NEXT: s_lshr_b32 s93, s94, 16
; SI-NEXT: v_readfirstlane_b32 s4, v10
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB43_4
+; SI-NEXT: s_cbranch_scc0 .LBB43_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s71, 16
@@ -24050,9 +23342,6 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB43_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB43_2
;
; VI-LABEL: bitcast_v48i16_to_v12i64_scalar:
; VI: ; %bb.0:
@@ -24124,7 +23413,7 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; VI-NEXT: s_lshr_b32 s31, s34, 16
; VI-NEXT: v_readfirstlane_b32 s4, v10
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB43_4
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s83, 16
@@ -24378,9 +23667,6 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB43_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB43_2
;
; GFX9-LABEL: bitcast_v48i16_to_v12i64_scalar:
; GFX9: ; %bb.0:
@@ -24460,9 +23746,9 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s57, s57, s62
; GFX9-NEXT: s_pack_ll_b32_b16 s58, s58, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s59, s59, s60
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
+; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -24488,10 +23774,8 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v21, s57, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v22, s58, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v23, s59, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB43_5
+; GFX9-NEXT: s_branch .LBB43_4
; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
-; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -24524,7 +23808,7 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB43_5: ; %end
+; GFX9-NEXT: .LBB43_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -24603,10 +23887,10 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s21, s58, s63
; GFX11-NEXT: s_pack_ll_b32_b16 s22, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s23, s44, s57
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-NEXT: .LBB43_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -24634,8 +23918,6 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v23, s23, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
-; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -25380,7 +24662,7 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; SI-NEXT: s_cmp_lg_u32 s12, 0
; SI-NEXT: v_readfirstlane_b32 s12, v0
; SI-NEXT: v_writelane_b32 v24, s35, 3
-; SI-NEXT: s_cbranch_scc0 .LBB45_4
+; SI-NEXT: s_cbranch_scc0 .LBB45_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s88, s5, 16
; SI-NEXT: s_lshr_b32 s89, s7, 16
@@ -25562,32 +24844,6 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB45_4:
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: s_branch .LBB45_2
;
; VI-LABEL: bitcast_v12i64_to_v48f16_scalar:
; VI: ; %bb.0:
@@ -25604,7 +24860,7 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s14, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s15, v0
-; VI-NEXT: s_cbranch_scc0 .LBB45_4
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s40, s6, 16
; VI-NEXT: s_lshr_b32 s41, s7, 16
@@ -25778,32 +25034,6 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v22, s7
; VI-NEXT: v_mov_b32_e32 v23, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_4:
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: s_branch .LBB45_2
;
; GFX9-LABEL: bitcast_v12i64_to_v48f16_scalar:
; GFX9: ; %bb.0:
@@ -25820,7 +25050,7 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s14, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s15, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s40, s6, 16
; GFX9-NEXT: s_lshr_b32 s41, s7, 16
@@ -25946,32 +25176,6 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v22, s7
; GFX9-NEXT: v_mov_b32_e32 v23, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_4:
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: s_branch .LBB45_2
;
; GFX11-LABEL: bitcast_v12i64_to_v48f16_scalar:
; GFX11: ; %bb.0:
@@ -25985,7 +25189,7 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s9, v0
; GFX11-NEXT: s_cmp_lg_u32 s10, 0
; GFX11-NEXT: s_mov_b32 s74, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s10, s4, 16
; GFX11-NEXT: s_lshr_b32 s11, s5, 16
@@ -26101,32 +25305,6 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v20, s7 :: v_dual_mov_b32 v21, s6
; GFX11-NEXT: v_dual_mov_b32 v22, s5 :: v_dual_mov_b32 v23, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_4:
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: s_branch .LBB45_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -27371,7 +26549,7 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s91, s89, 16
; SI-NEXT: v_readfirstlane_b32 s4, v10
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB47_3
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s71, 16
@@ -27445,7 +26623,7 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s59, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB47_4
+; SI-NEXT: s_cbranch_execnz .LBB47_3
; SI-NEXT: .LBB47_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s71
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -27639,11 +26817,8 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v22, v23, v22
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v24
; SI-NEXT: v_or_b32_e32 v23, v25, v23
-; SI-NEXT: s_branch .LBB47_5
+; SI-NEXT: s_branch .LBB47_4
; SI-NEXT: .LBB47_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB47_2
-; SI-NEXT: .LBB47_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -27676,7 +26851,7 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB47_5: ; %end
+; SI-NEXT: .LBB47_4: ; %end
; SI-NEXT: v_readlane_b32 s71, v32, 23
; SI-NEXT: v_readlane_b32 s70, v32, 22
; SI-NEXT: v_readlane_b32 s69, v32, 21
@@ -27777,7 +26952,7 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s35, s34, 16
; VI-NEXT: v_readfirstlane_b32 s4, v10
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s83, 16
@@ -27851,7 +27026,7 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s59, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB47_4
+; VI-NEXT: s_cbranch_execnz .LBB47_3
; VI-NEXT: .LBB47_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v23, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s83
@@ -27950,11 +27125,8 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; VI-NEXT: v_add_f16_sdwa v24, v24, v23 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v23, s6, v23
; VI-NEXT: v_or_b32_e32 v23, v23, v24
-; VI-NEXT: s_branch .LBB47_5
+; VI-NEXT: s_branch .LBB47_4
; VI-NEXT: .LBB47_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB47_2
-; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -27987,7 +27159,7 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB47_5: ; %end
+; VI-NEXT: .LBB47_4: ; %end
; VI-NEXT: v_readlane_b32 s83, v32, 27
; VI-NEXT: v_readlane_b32 s82, v32, 26
; VI-NEXT: v_readlane_b32 s81, v32, 25
@@ -28100,9 +27272,9 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s57, s57, s62
; GFX9-NEXT: s_pack_ll_b32_b16 s58, s58, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s59, s59, s60
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
+; GFX9-NEXT: s_cbranch_execnz .LBB47_3
; GFX9-NEXT: .LBB47_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v23, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v23 op_sel_hi:[1,0]
@@ -28129,10 +27301,8 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v21, s57, v23 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v22, s58, v23 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v23, s59, v23 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB47_5
+; GFX9-NEXT: s_branch .LBB47_4
; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
-; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -28165,7 +27335,7 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB47_5: ; %end
+; GFX9-NEXT: .LBB47_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -28244,10 +27414,10 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s21, s58, s63
; GFX11-NEXT: s_pack_ll_b32_b16 s22, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s23, s44, s57
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-NEXT: .LBB47_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -28275,8 +27445,6 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v23, 0x200, s23 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -28949,7 +28117,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: v_writelane_b32 v40, s35, 3
-; SI-NEXT: s_cbranch_scc0 .LBB49_3
+; SI-NEXT: s_cbranch_scc0 .LBB49_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s35, s13, 16
; SI-NEXT: s_lshr_b32 s34, s11, 16
@@ -28975,7 +28143,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; SI-NEXT: s_lshr_b64 s[72:73], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[74:75], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[76:77], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB49_4
+; SI-NEXT: s_cbranch_execnz .LBB49_3
; SI-NEXT: .LBB49_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[22:23], s[12:13], 1.0
; SI-NEXT: v_add_f64 v[20:21], s[10:11], 1.0
@@ -29013,34 +28181,8 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v1
-; SI-NEXT: s_branch .LBB49_5
+; SI-NEXT: s_branch .LBB49_4
; SI-NEXT: .LBB49_3:
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: s_branch .LBB49_2
-; SI-NEXT: .LBB49_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -29089,7 +28231,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v26, s42
; SI-NEXT: v_mov_b32_e32 v25, s40
; SI-NEXT: v_mov_b32_e32 v24, s14
-; SI-NEXT: .LBB49_5: ; %end
+; SI-NEXT: .LBB49_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v35
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -29187,7 +28329,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s5, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s4, v0
-; VI-NEXT: s_cbranch_scc0 .LBB49_3
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s40, s9, 16
; VI-NEXT: s_lshr_b32 s60, s8, 16
@@ -29213,7 +28355,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; VI-NEXT: s_lshr_b32 s78, s18, 16
; VI-NEXT: s_lshr_b32 s59, s17, 16
; VI-NEXT: s_lshr_b32 s79, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB49_4
+; VI-NEXT: s_cbranch_execnz .LBB49_3
; VI-NEXT: .LBB49_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[22:23], s[8:9], 1.0
; VI-NEXT: v_add_f64 v[20:21], s[10:11], 1.0
@@ -29251,34 +28393,8 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v55, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v53, 16, v0
-; VI-NEXT: s_branch .LBB49_5
+; VI-NEXT: s_branch .LBB49_4
; VI-NEXT: .LBB49_3:
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: s_branch .LBB49_2
-; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -29327,7 +28443,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v30, s42
; VI-NEXT: v_mov_b32_e32 v28, s41
; VI-NEXT: v_mov_b32_e32 v26, s40
-; VI-NEXT: .LBB49_5: ; %end
+; VI-NEXT: .LBB49_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v53, 16, v53
; VI-NEXT: v_lshlrev_b32_e32 v51, 16, v51
; VI-NEXT: v_lshlrev_b32_e32 v49, 16, v49
@@ -29393,7 +28509,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s5, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s40, s9, 16
; GFX9-NEXT: s_lshr_b32 s60, s8, 16
@@ -29419,7 +28535,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; GFX9-NEXT: s_lshr_b32 s78, s18, 16
; GFX9-NEXT: s_lshr_b32 s59, s17, 16
; GFX9-NEXT: s_lshr_b32 s79, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
+; GFX9-NEXT: s_cbranch_execnz .LBB49_3
; GFX9-NEXT: .LBB49_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[22:23], s[8:9], 1.0
; GFX9-NEXT: v_add_f64 v[20:21], s[10:11], 1.0
@@ -29457,34 +28573,8 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v0
-; GFX9-NEXT: s_branch .LBB49_5
+; GFX9-NEXT: s_branch .LBB49_4
; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: s_branch .LBB49_2
-; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -29533,7 +28623,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v30, s42
; GFX9-NEXT: v_mov_b32_e32 v28, s41
; GFX9-NEXT: v_mov_b32_e32 v26, s40
-; GFX9-NEXT: .LBB49_5: ; %end
+; GFX9-NEXT: .LBB49_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -29596,7 +28686,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s8, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s10, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s10, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s7, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s47, s6, 16
@@ -29623,7 +28713,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s74, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s10
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], s[6:7], 1.0
; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], s[4:5], 1.0
@@ -29661,34 +28751,8 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB49_5
+; GFX11-TRUE16-NEXT: s_branch .LBB49_4
; GFX11-TRUE16-NEXT: .LBB49_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-TRUE16-NEXT: s_branch .LBB49_2
-; GFX11-TRUE16-NEXT: .LBB49_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -29713,7 +28777,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s40 :: v_dual_mov_b32 v33, s15
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s14 :: v_dual_mov_b32 v29, s13
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, s12 :: v_dual_mov_b32 v26, s11
-; GFX11-TRUE16-NEXT: .LBB49_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB49_4: ; %end
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, v38 :: v_dual_mov_b32 v35, v35
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v28 :: v_dual_mov_b32 v53, v53
@@ -29767,7 +28831,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s8, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s10, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s10, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s7, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s47, s6, 16
@@ -29794,7 +28858,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s74, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s10
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f64 v[19:20], s[6:7], 1.0
; GFX11-FAKE16-NEXT: v_add_f64 v[21:22], s[4:5], 1.0
@@ -29832,34 +28896,8 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v0
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v3
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-FAKE16-NEXT: s_branch .LBB49_5
+; GFX11-FAKE16-NEXT: s_branch .LBB49_4
; GFX11-FAKE16-NEXT: .LBB49_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-FAKE16-NEXT: s_branch .LBB49_2
-; GFX11-FAKE16-NEXT: .LBB49_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v25, s20
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v11, s22
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s16 :: v_dual_mov_b32 v9, s24
@@ -29884,7 +28922,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v38, s41 :: v_dual_mov_b32 v35, s15
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s14 :: v_dual_mov_b32 v31, s13
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s12 :: v_dual_mov_b32 v29, s11
-; GFX11-FAKE16-NEXT: .LBB49_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB49_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v64, 0xffff, v0
@@ -31066,7 +30104,7 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; SI-NEXT: s_lshr_b32 s93, s94, 16
; SI-NEXT: v_readfirstlane_b32 s4, v10
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB51_4
+; SI-NEXT: s_cbranch_scc0 .LBB51_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s71, 16
@@ -31316,9 +30354,6 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB51_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB51_2
;
; VI-LABEL: bitcast_v48i16_to_v12f64_scalar:
; VI: ; %bb.0:
@@ -31390,7 +30425,7 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; VI-NEXT: s_lshr_b32 s31, s34, 16
; VI-NEXT: v_readfirstlane_b32 s4, v10
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB51_4
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s83, 16
@@ -31644,9 +30679,6 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB51_2
;
; GFX9-LABEL: bitcast_v48i16_to_v12f64_scalar:
; GFX9: ; %bb.0:
@@ -31726,9 +30758,9 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s57, s57, s62
; GFX9-NEXT: s_pack_ll_b32_b16 s58, s58, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s59, s59, s60
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB51_4
+; GFX9-NEXT: s_cbranch_execnz .LBB51_3
; GFX9-NEXT: .LBB51_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -31754,10 +30786,8 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v21, s57, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v22, s58, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v23, s59, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB51_5
+; GFX9-NEXT: s_branch .LBB51_4
; GFX9-NEXT: .LBB51_3:
-; GFX9-NEXT: s_branch .LBB51_2
-; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -31790,7 +30820,7 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB51_5: ; %end
+; GFX9-NEXT: .LBB51_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -31869,10 +30899,10 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s21, s58, s63
; GFX11-NEXT: s_pack_ll_b32_b16 s22, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s23, s44, s57
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-NEXT: .LBB51_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -31900,8 +30930,6 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v23, s23, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
-; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -32574,7 +31602,7 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: v_writelane_b32 v40, s35, 3
-; SI-NEXT: s_cbranch_scc0 .LBB53_3
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s35, s13, 16
; SI-NEXT: s_lshr_b32 s34, s11, 16
@@ -32600,7 +31628,7 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; SI-NEXT: s_lshr_b64 s[72:73], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[74:75], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[76:77], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB53_4
+; SI-NEXT: s_cbranch_execnz .LBB53_3
; SI-NEXT: .LBB53_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[22:23], s[12:13], 1.0
; SI-NEXT: v_add_f64 v[20:21], s[10:11], 1.0
@@ -32638,34 +31666,8 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v5
; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v1
-; SI-NEXT: s_branch .LBB53_5
+; SI-NEXT: s_branch .LBB53_4
; SI-NEXT: .LBB53_3:
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr91
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: s_branch .LBB53_2
-; SI-NEXT: .LBB53_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -32714,7 +31716,7 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; SI-NEXT: v_mov_b32_e32 v26, s42
; SI-NEXT: v_mov_b32_e32 v25, s40
; SI-NEXT: v_mov_b32_e32 v24, s14
-; SI-NEXT: .LBB53_5: ; %end
+; SI-NEXT: .LBB53_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v35
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -32812,7 +31814,7 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; VI-NEXT: v_readfirstlane_b32 s5, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s4, v0
-; VI-NEXT: s_cbranch_scc0 .LBB53_3
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s40, s9, 16
; VI-NEXT: s_lshr_b32 s60, s8, 16
@@ -32838,7 +31840,7 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; VI-NEXT: s_lshr_b32 s78, s18, 16
; VI-NEXT: s_lshr_b32 s59, s17, 16
; VI-NEXT: s_lshr_b32 s79, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB53_4
+; VI-NEXT: s_cbranch_execnz .LBB53_3
; VI-NEXT: .LBB53_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[22:23], s[8:9], 1.0
; VI-NEXT: v_add_f64 v[20:21], s[10:11], 1.0
@@ -32876,34 +31878,8 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v55, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v53, 16, v0
-; VI-NEXT: s_branch .LBB53_5
+; VI-NEXT: s_branch .LBB53_4
; VI-NEXT: .LBB53_3:
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: s_branch .LBB53_2
-; VI-NEXT: .LBB53_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -32952,7 +31928,7 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; VI-NEXT: v_mov_b32_e32 v30, s42
; VI-NEXT: v_mov_b32_e32 v28, s41
; VI-NEXT: v_mov_b32_e32 v26, s40
-; VI-NEXT: .LBB53_5: ; %end
+; VI-NEXT: .LBB53_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v53, 16, v53
; VI-NEXT: v_lshlrev_b32_e32 v51, 16, v51
; VI-NEXT: v_lshlrev_b32_e32 v49, 16, v49
@@ -33018,7 +31994,7 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; GFX9-NEXT: v_readfirstlane_b32 s5, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s40, s9, 16
; GFX9-NEXT: s_lshr_b32 s60, s8, 16
@@ -33044,7 +32020,7 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; GFX9-NEXT: s_lshr_b32 s78, s18, 16
; GFX9-NEXT: s_lshr_b32 s59, s17, 16
; GFX9-NEXT: s_lshr_b32 s79, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB53_4
+; GFX9-NEXT: s_cbranch_execnz .LBB53_3
; GFX9-NEXT: .LBB53_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[22:23], s[8:9], 1.0
; GFX9-NEXT: v_add_f64 v[20:21], s[10:11], 1.0
@@ -33082,34 +32058,8 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v0
-; GFX9-NEXT: s_branch .LBB53_5
+; GFX9-NEXT: s_branch .LBB53_4
; GFX9-NEXT: .LBB53_3:
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr41
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: s_branch .LBB53_2
-; GFX9-NEXT: .LBB53_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -33158,7 +32108,7 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v30, s42
; GFX9-NEXT: v_mov_b32_e32 v28, s41
; GFX9-NEXT: v_mov_b32_e32 v26, s40
-; GFX9-NEXT: .LBB53_5: ; %end
+; GFX9-NEXT: .LBB53_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -33221,7 +32171,7 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s8, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s10, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s10, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s7, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s47, s6, 16
@@ -33248,7 +32198,7 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s74, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s10
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], s[6:7], 1.0
; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], s[4:5], 1.0
@@ -33286,34 +32236,8 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB53_5
+; GFX11-TRUE16-NEXT: s_branch .LBB53_4
; GFX11-TRUE16-NEXT: .LBB53_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-TRUE16-NEXT: s_branch .LBB53_2
-; GFX11-TRUE16-NEXT: .LBB53_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -33338,7 +32262,7 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s40 :: v_dual_mov_b32 v33, s15
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s14 :: v_dual_mov_b32 v29, s13
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, s12 :: v_dual_mov_b32 v26, s11
-; GFX11-TRUE16-NEXT: .LBB53_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB53_4: ; %end
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, v38 :: v_dual_mov_b32 v35, v35
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v28 :: v_dual_mov_b32 v53, v53
@@ -33392,7 +32316,7 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s8, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s10, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s10, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s7, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s47, s6, 16
@@ -33419,7 +32343,7 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s74, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s10
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f64 v[19:20], s[6:7], 1.0
; GFX11-FAKE16-NEXT: v_add_f64 v[21:22], s[4:5], 1.0
@@ -33457,34 +32381,8 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v0
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v3
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-FAKE16-NEXT: s_branch .LBB53_5
+; GFX11-FAKE16-NEXT: s_branch .LBB53_4
; GFX11-FAKE16-NEXT: .LBB53_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-FAKE16-NEXT: s_branch .LBB53_2
-; GFX11-FAKE16-NEXT: .LBB53_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v25, s20
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v11, s22
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s16 :: v_dual_mov_b32 v9, s24
@@ -33509,7 +32407,7 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v38, s41 :: v_dual_mov_b32 v35, s15
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s14 :: v_dual_mov_b32 v31, s13
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s12 :: v_dual_mov_b32 v29, s11
-; GFX11-FAKE16-NEXT: .LBB53_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB53_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v64, 0xffff, v0
@@ -34803,7 +33701,7 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; SI-NEXT: s_lshr_b32 s91, s89, 16
; SI-NEXT: v_readfirstlane_b32 s4, v10
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB55_3
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s71, 16
@@ -34877,7 +33775,7 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s59, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB55_4
+; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s71
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -35071,11 +33969,8 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; SI-NEXT: v_or_b32_e32 v22, v23, v22
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v24
; SI-NEXT: v_or_b32_e32 v23, v25, v23
-; SI-NEXT: s_branch .LBB55_5
+; SI-NEXT: s_branch .LBB55_4
; SI-NEXT: .LBB55_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB55_2
-; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -35108,7 +34003,7 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB55_5: ; %end
+; SI-NEXT: .LBB55_4: ; %end
; SI-NEXT: v_readlane_b32 s71, v32, 23
; SI-NEXT: v_readlane_b32 s70, v32, 22
; SI-NEXT: v_readlane_b32 s69, v32, 21
@@ -35209,7 +34104,7 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; VI-NEXT: s_lshr_b32 s35, s34, 16
; VI-NEXT: v_readfirstlane_b32 s4, v10
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s83, 16
@@ -35283,7 +34178,7 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s59, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB55_4
+; VI-NEXT: s_cbranch_execnz .LBB55_3
; VI-NEXT: .LBB55_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v23, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s83
@@ -35382,11 +34277,8 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; VI-NEXT: v_add_f16_sdwa v24, v24, v23 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v23, s6, v23
; VI-NEXT: v_or_b32_e32 v23, v23, v24
-; VI-NEXT: s_branch .LBB55_5
+; VI-NEXT: s_branch .LBB55_4
; VI-NEXT: .LBB55_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB55_2
-; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -35419,7 +34311,7 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB55_5: ; %end
+; VI-NEXT: .LBB55_4: ; %end
; VI-NEXT: v_readlane_b32 s83, v32, 27
; VI-NEXT: v_readlane_b32 s82, v32, 26
; VI-NEXT: v_readlane_b32 s81, v32, 25
@@ -35532,9 +34424,9 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX9-NEXT: s_pack_ll_b32_b16 s57, s57, s62
; GFX9-NEXT: s_pack_ll_b32_b16 s58, s58, s61
; GFX9-NEXT: s_pack_ll_b32_b16 s59, s59, s60
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
+; GFX9-NEXT: s_cbranch_execnz .LBB55_3
; GFX9-NEXT: .LBB55_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v23, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v23 op_sel_hi:[1,0]
@@ -35561,10 +34453,8 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX9-NEXT: v_pk_add_f16 v21, s57, v23 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v22, s58, v23 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v23, s59, v23 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB55_5
+; GFX9-NEXT: s_branch .LBB55_4
; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
-; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -35597,7 +34487,7 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB55_5: ; %end
+; GFX9-NEXT: .LBB55_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -35676,10 +34566,10 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX11-NEXT: s_pack_ll_b32_b16 s21, s58, s63
; GFX11-NEXT: s_pack_ll_b32_b16 s22, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s23, s44, s57
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-NEXT: .LBB55_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -35707,8 +34597,6 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX11-NEXT: v_pk_add_f16 v23, 0x200, s23 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
-; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -36918,7 +35806,7 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; SI-NEXT: s_lshr_b32 s80, s81, 16
; SI-NEXT: v_readfirstlane_b32 s4, v10
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB57_4
+; SI-NEXT: s_cbranch_scc0 .LBB57_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s5, s17, 0xffff
; SI-NEXT: s_lshl_b32 s7, s36, 16
@@ -37304,32 +36192,6 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB57_4:
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: s_branch .LBB57_2
;
; VI-LABEL: bitcast_v48i16_to_v48f16_scalar:
; VI: ; %bb.0:
@@ -37370,7 +36232,7 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; VI-NEXT: s_lshr_b32 s73, s72, 16
; VI-NEXT: v_readfirstlane_b32 s4, v10
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB57_3
; VI-NEXT: .LBB57_2: ; %cmp.true
@@ -37520,8 +36382,6 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v22, s8
; VI-NEXT: v_mov_b32_e32 v23, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v48i16_to_v48f16_scalar:
; GFX9: ; %bb.0:
@@ -37562,9 +36422,9 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s44, s62, 16
; GFX9-NEXT: v_readfirstlane_b32 s4, v10
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
+; GFX9-NEXT: s_cbranch_execnz .LBB57_3
; GFX9-NEXT: .LBB57_2: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s79, s61
; GFX9-NEXT: v_pk_add_u16 v23, s4, 3 op_sel_hi:[1,0]
@@ -37638,10 +36498,8 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v21
; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v22
; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX9-NEXT: s_branch .LBB57_5
+; GFX9-NEXT: s_branch .LBB57_4
; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
-; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v23, s79
; GFX9-NEXT: v_mov_b32_e32 v22, s78
; GFX9-NEXT: v_mov_b32_e32 v21, s77
@@ -37690,7 +36548,7 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v53, s8
; GFX9-NEXT: v_mov_b32_e32 v54, s7
; GFX9-NEXT: v_mov_b32_e32 v55, s6
-; GFX9-NEXT: .LBB57_5: ; %end
+; GFX9-NEXT: .LBB57_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -37777,10 +36635,10 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s60, 16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s74, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s74, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s74
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s57, s72, s57
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s47, s63, s47
@@ -37854,10 +36712,8 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX11-TRUE16-NEXT: s_branch .LBB57_5
+; GFX11-TRUE16-NEXT: s_branch .LBB57_4
; GFX11-TRUE16-NEXT: .LBB57_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB57_2
-; GFX11-TRUE16-NEXT: .LBB57_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, s72 :: v_dual_mov_b32 v22, s63
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, s73 :: v_dual_mov_b32 v20, s62
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, s61 :: v_dual_mov_b32 v18, s60
@@ -37882,7 +36738,7 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s9 :: v_dual_mov_b32 v51, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v52, s6 :: v_dual_mov_b32 v53, s8
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s4 :: v_dual_mov_b32 v55, s5
-; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB57_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
@@ -37958,10 +36814,10 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s60, 16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s74, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s74, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s74
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-FAKE16-NEXT: .LBB57_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s57, s72, s57
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s47, s63, s47
@@ -38035,10 +36891,8 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v20
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
-; GFX11-FAKE16-NEXT: s_branch .LBB57_5
+; GFX11-FAKE16-NEXT: s_branch .LBB57_4
; GFX11-FAKE16-NEXT: .LBB57_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB57_2
-; GFX11-FAKE16-NEXT: .LBB57_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, s72 :: v_dual_mov_b32 v20, s63
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s73 :: v_dual_mov_b32 v22, s62
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s61 :: v_dual_mov_b32 v14, s60
@@ -38063,7 +36917,7 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v50, s9 :: v_dual_mov_b32 v51, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s6 :: v_dual_mov_b32 v53, s8
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v54, s4 :: v_dual_mov_b32 v55, s5
-; GFX11-FAKE16-NEXT: .LBB57_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB57_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v64, 0xffff, v1
@@ -38962,9 +37816,9 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
-; SI-NEXT: s_cbranch_scc0 .LBB59_3
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB59_4
+; SI-NEXT: s_cbranch_execnz .LBB59_3
; SI-NEXT: .LBB59_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v1, s78
; SI-NEXT: v_cvt_f32_f16_e32 v2, s77
@@ -39186,10 +38040,8 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; SI-NEXT: v_lshr_b64 v[50:51], v[4:5], 16
; SI-NEXT: v_lshr_b64 v[48:49], v[6:7], 16
; SI-NEXT: v_lshr_b64 v[38:39], v[8:9], 16
-; SI-NEXT: s_branch .LBB59_5
+; SI-NEXT: s_branch .LBB59_4
; SI-NEXT: .LBB59_3:
-; SI-NEXT: s_branch .LBB59_2
-; SI-NEXT: .LBB59_4:
; SI-NEXT: v_mov_b32_e32 v25, s72
; SI-NEXT: s_waitcnt expcnt(6)
; SI-NEXT: v_mov_b32_e32 v57, s58
@@ -39245,7 +38097,7 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v28, s61
; SI-NEXT: v_mov_b32_e32 v26, s57
; SI-NEXT: v_mov_b32_e32 v24, s47
-; SI-NEXT: .LBB59_5: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v54
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v47
; SI-NEXT: v_or_b32_e32 v0, v2, v0
@@ -39380,9 +38232,9 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s79, s78, 16
; VI-NEXT: v_readfirstlane_b32 s4, v10
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
+; VI-NEXT: s_cbranch_execnz .LBB59_3
; VI-NEXT: .LBB59_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v24, 0x200
; VI-NEXT: v_add_f16_e32 v0, s16, v24
@@ -39433,10 +38285,8 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v25, s47, v24
; VI-NEXT: v_add_f16_e32 v23, s44, v24
; VI-NEXT: v_add_f16_e32 v24, s45, v24
-; VI-NEXT: s_branch .LBB59_5
+; VI-NEXT: s_branch .LBB59_4
; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
-; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v24, s45
; VI-NEXT: v_mov_b32_e32 v23, s44
; VI-NEXT: v_mov_b32_e32 v25, s47
@@ -39485,7 +38335,7 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v55, s43
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: .LBB59_5: ; %end
+; VI-NEXT: .LBB59_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v55, 16, v55
; VI-NEXT: v_lshlrev_b32_e32 v54, 16, v54
; VI-NEXT: v_lshlrev_b32_e32 v53, 16, v53
@@ -39575,9 +38425,9 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s44, s62, 16
; GFX9-NEXT: v_readfirstlane_b32 s4, v10
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
+; GFX9-NEXT: s_cbranch_execnz .LBB59_3
; GFX9-NEXT: .LBB59_2: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s79, s61
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
@@ -39652,10 +38502,8 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v21
; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v22
; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX9-NEXT: s_branch .LBB59_5
+; GFX9-NEXT: s_branch .LBB59_4
; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
-; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v23, s79
; GFX9-NEXT: v_mov_b32_e32 v22, s78
; GFX9-NEXT: v_mov_b32_e32 v21, s77
@@ -39704,7 +38552,7 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v53, s8
; GFX9-NEXT: v_mov_b32_e32 v54, s7
; GFX9-NEXT: v_mov_b32_e32 v55, s6
-; GFX9-NEXT: .LBB59_5: ; %end
+; GFX9-NEXT: .LBB59_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -39791,10 +38639,10 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s60, 16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s74, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s74, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s74
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s57, s72, s57
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s47, s63, s47
@@ -39868,10 +38716,8 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX11-TRUE16-NEXT: s_branch .LBB59_5
+; GFX11-TRUE16-NEXT: s_branch .LBB59_4
; GFX11-TRUE16-NEXT: .LBB59_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB59_2
-; GFX11-TRUE16-NEXT: .LBB59_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, s72 :: v_dual_mov_b32 v22, s63
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, s73 :: v_dual_mov_b32 v20, s62
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, s61 :: v_dual_mov_b32 v18, s60
@@ -39896,7 +38742,7 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s9 :: v_dual_mov_b32 v51, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v52, s6 :: v_dual_mov_b32 v53, s8
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s4 :: v_dual_mov_b32 v55, s5
-; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB59_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
@@ -39972,10 +38818,10 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s60, 16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s74, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s74, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s74
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-FAKE16-NEXT: .LBB59_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s57, s72, s57
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s47, s63, s47
@@ -40049,10 +38895,8 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v20
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
-; GFX11-FAKE16-NEXT: s_branch .LBB59_5
+; GFX11-FAKE16-NEXT: s_branch .LBB59_4
; GFX11-FAKE16-NEXT: .LBB59_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB59_2
-; GFX11-FAKE16-NEXT: .LBB59_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, s72 :: v_dual_mov_b32 v20, s63
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s73 :: v_dual_mov_b32 v22, s62
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s61 :: v_dual_mov_b32 v14, s60
@@ -40077,7 +38921,7 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v50, s9 :: v_dual_mov_b32 v51, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s6 :: v_dual_mov_b32 v53, s8
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v54, s4 :: v_dual_mov_b32 v55, s5
-; GFX11-FAKE16-NEXT: .LBB59_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB59_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v64, 0xffff, v1
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
index eba8927f1bbb3..f214f78e45eff 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
@@ -198,7 +198,7 @@ define inreg <26 x float> @bitcast_v26i32_to_v26f32_scalar(<26 x i32> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s40, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s41, v0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -256,8 +256,6 @@ define inreg <26 x float> @bitcast_v26i32_to_v26f32_scalar(<26 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v24, s7
; SI-NEXT: v_mov_b32_e32 v25, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v26i32_to_v26f32_scalar:
; VI: ; %bb.0:
@@ -276,7 +274,7 @@ define inreg <26 x float> @bitcast_v26i32_to_v26f32_scalar(<26 x i32> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s40, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s41, v0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -334,8 +332,6 @@ define inreg <26 x float> @bitcast_v26i32_to_v26f32_scalar(<26 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v24, s7
; VI-NEXT: v_mov_b32_e32 v25, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v26i32_to_v26f32_scalar:
; GFX9: ; %bb.0:
@@ -354,7 +350,7 @@ define inreg <26 x float> @bitcast_v26i32_to_v26f32_scalar(<26 x i32> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s40, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s41, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -412,8 +408,6 @@ define inreg <26 x float> @bitcast_v26i32_to_v26f32_scalar(<26 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v24, s7
; GFX9-NEXT: v_mov_b32_e32 v25, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v26i32_to_v26f32_scalar:
; GFX11: ; %bb.0:
@@ -429,7 +423,7 @@ define inreg <26 x float> @bitcast_v26i32_to_v26f32_scalar(<26 x i32> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s11, v0
; GFX11-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-NEXT: s_mov_b32 s12, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -476,8 +470,6 @@ define inreg <26 x float> @bitcast_v26i32_to_v26f32_scalar(<26 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v22, s7 :: v_dual_mov_b32 v23, s6
; GFX11-NEXT: v_dual_mov_b32 v24, s5 :: v_dual_mov_b32 v25, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -704,9 +696,9 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v25, s61, 1.0
; SI-NEXT: v_add_f32_e64 v24, s60, 1.0
@@ -734,10 +726,8 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB3_5
+; SI-NEXT: s_branch .LBB3_4
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -770,7 +760,7 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB3_5: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -835,9 +825,9 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v25, s61, 1.0
; VI-NEXT: v_add_f32_e64 v24, s60, 1.0
@@ -865,10 +855,8 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB3_5
+; VI-NEXT: s_branch .LBB3_4
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -901,7 +889,7 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB3_5: ; %end
+; VI-NEXT: .LBB3_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -966,9 +954,9 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v25, s61, 1.0
; GFX9-NEXT: v_add_f32_e64 v24, s60, 1.0
@@ -996,10 +984,8 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB3_5
+; GFX9-NEXT: s_branch .LBB3_4
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -1032,7 +1018,7 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB3_5: ; %end
+; GFX9-NEXT: .LBB3_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -1098,10 +1084,10 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v25, s61, 1.0
; GFX11-NEXT: v_add_f32_e64 v24, s60, 1.0
@@ -1129,10 +1115,8 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB3_5
+; GFX11-NEXT: s_branch .LBB3_4
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -1149,7 +1133,7 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB3_5: ; %end
+; GFX11-NEXT: .LBB3_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -1376,7 +1360,7 @@ define inreg <13 x i64> @bitcast_v26i32_to_v13i64_scalar(<26 x i32> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s40, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s41, v0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB5_3
; SI-NEXT: .LBB5_2: ; %cmp.true
@@ -1434,8 +1418,6 @@ define inreg <13 x i64> @bitcast_v26i32_to_v13i64_scalar(<26 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v24, s7
; SI-NEXT: v_mov_b32_e32 v25, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v26i32_to_v13i64_scalar:
; VI: ; %bb.0:
@@ -1454,7 +1436,7 @@ define inreg <13 x i64> @bitcast_v26i32_to_v13i64_scalar(<26 x i32> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s40, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s41, v0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -1512,8 +1494,6 @@ define inreg <13 x i64> @bitcast_v26i32_to_v13i64_scalar(<26 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v24, s7
; VI-NEXT: v_mov_b32_e32 v25, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v26i32_to_v13i64_scalar:
; GFX9: ; %bb.0:
@@ -1532,7 +1512,7 @@ define inreg <13 x i64> @bitcast_v26i32_to_v13i64_scalar(<26 x i32> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s40, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s41, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -1590,8 +1570,6 @@ define inreg <13 x i64> @bitcast_v26i32_to_v13i64_scalar(<26 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v24, s7
; GFX9-NEXT: v_mov_b32_e32 v25, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v26i32_to_v13i64_scalar:
; GFX11: ; %bb.0:
@@ -1607,7 +1585,7 @@ define inreg <13 x i64> @bitcast_v26i32_to_v13i64_scalar(<26 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s11, v0
; GFX11-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-NEXT: s_mov_b32 s12, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -1654,8 +1632,6 @@ define inreg <13 x i64> @bitcast_v26i32_to_v13i64_scalar(<26 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v22, s7 :: v_dual_mov_b32 v23, s6
; GFX11-NEXT: v_dual_mov_b32 v24, s5 :: v_dual_mov_b32 v25, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1872,7 +1848,7 @@ define inreg <26 x i32> @bitcast_v13i64_to_v26i32_scalar(<13 x i64> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s40, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s41, v0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB7_3
; SI-NEXT: .LBB7_2: ; %cmp.true
@@ -1930,8 +1906,6 @@ define inreg <26 x i32> @bitcast_v13i64_to_v26i32_scalar(<13 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v24, s7
; SI-NEXT: v_mov_b32_e32 v25, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v13i64_to_v26i32_scalar:
; VI: ; %bb.0:
@@ -1950,7 +1924,7 @@ define inreg <26 x i32> @bitcast_v13i64_to_v26i32_scalar(<13 x i64> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s40, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s41, v0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -2008,8 +1982,6 @@ define inreg <26 x i32> @bitcast_v13i64_to_v26i32_scalar(<13 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v24, s7
; VI-NEXT: v_mov_b32_e32 v25, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v13i64_to_v26i32_scalar:
; GFX9: ; %bb.0:
@@ -2028,7 +2000,7 @@ define inreg <26 x i32> @bitcast_v13i64_to_v26i32_scalar(<13 x i64> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s40, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s41, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
@@ -2086,8 +2058,6 @@ define inreg <26 x i32> @bitcast_v13i64_to_v26i32_scalar(<13 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v24, s7
; GFX9-NEXT: v_mov_b32_e32 v25, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v13i64_to_v26i32_scalar:
; GFX11: ; %bb.0:
@@ -2103,7 +2073,7 @@ define inreg <26 x i32> @bitcast_v13i64_to_v26i32_scalar(<13 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s11, v0
; GFX11-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-NEXT: s_mov_b32 s12, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
@@ -2150,8 +2120,6 @@ define inreg <26 x i32> @bitcast_v13i64_to_v26i32_scalar(<13 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v22, s7 :: v_dual_mov_b32 v23, s6
; GFX11-NEXT: v_dual_mov_b32 v24, s5 :: v_dual_mov_b32 v25, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2361,7 +2329,7 @@ define inreg <13 x double> @bitcast_v26i32_to_v13f64_scalar(<26 x i32> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s40, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s41, v0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB9_3
; SI-NEXT: .LBB9_2: ; %cmp.true
@@ -2419,8 +2387,6 @@ define inreg <13 x double> @bitcast_v26i32_to_v13f64_scalar(<26 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v24, s7
; SI-NEXT: v_mov_b32_e32 v25, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v26i32_to_v13f64_scalar:
; VI: ; %bb.0:
@@ -2439,7 +2405,7 @@ define inreg <13 x double> @bitcast_v26i32_to_v13f64_scalar(<26 x i32> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s40, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s41, v0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -2497,8 +2463,6 @@ define inreg <13 x double> @bitcast_v26i32_to_v13f64_scalar(<26 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v24, s7
; VI-NEXT: v_mov_b32_e32 v25, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v26i32_to_v13f64_scalar:
; GFX9: ; %bb.0:
@@ -2517,7 +2481,7 @@ define inreg <13 x double> @bitcast_v26i32_to_v13f64_scalar(<26 x i32> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s40, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s41, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -2575,8 +2539,6 @@ define inreg <13 x double> @bitcast_v26i32_to_v13f64_scalar(<26 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v24, s7
; GFX9-NEXT: v_mov_b32_e32 v25, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v26i32_to_v13f64_scalar:
; GFX11: ; %bb.0:
@@ -2592,7 +2554,7 @@ define inreg <13 x double> @bitcast_v26i32_to_v13f64_scalar(<26 x i32> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s11, v0
; GFX11-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-NEXT: s_mov_b32 s12, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -2639,8 +2601,6 @@ define inreg <13 x double> @bitcast_v26i32_to_v13f64_scalar(<26 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v22, s7 :: v_dual_mov_b32 v23, s6
; GFX11-NEXT: v_dual_mov_b32 v24, s5 :: v_dual_mov_b32 v25, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2828,9 +2788,9 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
; SI-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
@@ -2845,10 +2805,8 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; SI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; SI-NEXT: s_branch .LBB11_5
+; SI-NEXT: s_branch .LBB11_4
; SI-NEXT: .LBB11_3:
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -2881,7 +2839,7 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB11_5: ; %end
+; SI-NEXT: .LBB11_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -2946,9 +2904,9 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
; VI-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
@@ -2963,10 +2921,8 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; VI-NEXT: s_branch .LBB11_5
+; VI-NEXT: s_branch .LBB11_4
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -2999,7 +2955,7 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB11_5: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -3064,9 +3020,9 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
; GFX9-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
@@ -3081,10 +3037,8 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX9-NEXT: s_branch .LBB11_5
+; GFX9-NEXT: s_branch .LBB11_4
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -3117,7 +3071,7 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB11_5: ; %end
+; GFX9-NEXT: .LBB11_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -3183,10 +3137,10 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
; GFX11-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
@@ -3201,10 +3155,8 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX11-NEXT: s_branch .LBB11_5
+; GFX11-NEXT: s_branch .LBB11_4
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -3221,7 +3173,7 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB11_5: ; %end
+; GFX11-NEXT: .LBB11_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -4045,7 +3997,7 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; SI-NEXT: s_cmp_lg_u32 s14, 0
; SI-NEXT: v_readfirstlane_b32 s14, v0
; SI-NEXT: v_writelane_b32 v26, s48, 8
-; SI-NEXT: s_cbranch_scc0 .LBB13_4
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s92, s5, 16
; SI-NEXT: s_lshr_b32 s93, s7, 16
@@ -4246,34 +4198,6 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB13_4:
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: s_branch .LBB13_2
;
; VI-LABEL: bitcast_v26i32_to_v52i16_scalar:
; VI: ; %bb.0:
@@ -4292,7 +4216,7 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s40, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s41, v0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s42, s6, 16
; VI-NEXT: s_lshr_b32 s43, s7, 16
@@ -4480,34 +4404,6 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v24, s7
; VI-NEXT: v_mov_b32_e32 v25, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v26i32_to_v52i16_scalar:
; GFX9: ; %bb.0:
@@ -4526,7 +4422,7 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s40, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s41, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s42, s6, 16
; GFX9-NEXT: s_lshr_b32 s43, s7, 16
@@ -4662,34 +4558,6 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v24, s7
; GFX9-NEXT: v_mov_b32_e32 v25, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v26i32_to_v52i16_scalar:
; GFX11: ; %bb.0:
@@ -4705,7 +4573,7 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s11, v0
; GFX11-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-NEXT: s_mov_b32 s78, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s12, s4, 16
; GFX11-NEXT: s_lshr_b32 s13, s5, 16
@@ -4830,34 +4698,6 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v22, s7 :: v_dual_mov_b32 v23, s6
; GFX11-NEXT: v_dual_mov_b32 v24, s5 :: v_dual_mov_b32 v25, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: ; implicit-def: $sgpr77
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr75
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6100,7 +5940,7 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; SI-NEXT: s_lshr_b32 s34, s35, 16
; SI-NEXT: v_readfirstlane_b32 s4, v12
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s83, 16
@@ -6372,9 +6212,6 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v52i16_to_v26i32_scalar:
; VI: ; %bb.0:
@@ -6454,7 +6291,7 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; VI-NEXT: s_lshr_b32 s70, s71, 16
; VI-NEXT: v_readfirstlane_b32 s4, v12
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s87, 16
@@ -6730,9 +6567,6 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v52i16_to_v26i32_scalar:
; GFX9: ; %bb.0:
@@ -6818,9 +6652,9 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s59, s59, s72
; GFX9-NEXT: s_pack_ll_b32_b16 s60, s60, s63
; GFX9-NEXT: s_pack_ll_b32_b16 s61, s61, s62
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -6848,10 +6682,8 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v23, s59, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v24, s60, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v25, s61, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB15_5
+; GFX9-NEXT: s_branch .LBB15_4
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -6884,7 +6716,7 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB15_5: ; %end
+; GFX9-NEXT: .LBB15_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -6969,10 +6801,10 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s23, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s24, s47, s60
; GFX11-NEXT: s_pack_ll_b32_b16 s25, s46, s59
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -7002,8 +6834,6 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v25, s25, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -7827,7 +7657,7 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; SI-NEXT: s_cmp_lg_u32 s14, 0
; SI-NEXT: v_readfirstlane_b32 s14, v0
; SI-NEXT: v_writelane_b32 v26, s48, 8
-; SI-NEXT: s_cbranch_scc0 .LBB17_4
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s92, s5, 16
; SI-NEXT: s_lshr_b32 s93, s7, 16
@@ -8028,34 +7858,6 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB17_4:
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: s_branch .LBB17_2
;
; VI-LABEL: bitcast_v26i32_to_v52f16_scalar:
; VI: ; %bb.0:
@@ -8074,7 +7876,7 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s40, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s41, v0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s42, s6, 16
; VI-NEXT: s_lshr_b32 s43, s7, 16
@@ -8262,34 +8064,6 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v24, s7
; VI-NEXT: v_mov_b32_e32 v25, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v26i32_to_v52f16_scalar:
; GFX9: ; %bb.0:
@@ -8308,7 +8082,7 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s40, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s41, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s42, s6, 16
; GFX9-NEXT: s_lshr_b32 s43, s7, 16
@@ -8444,34 +8218,6 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v24, s7
; GFX9-NEXT: v_mov_b32_e32 v25, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v26i32_to_v52f16_scalar:
; GFX11: ; %bb.0:
@@ -8487,7 +8233,7 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s11, v0
; GFX11-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-NEXT: s_mov_b32 s78, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s12, s4, 16
; GFX11-NEXT: s_lshr_b32 s13, s5, 16
@@ -8612,34 +8358,6 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v22, s7 :: v_dual_mov_b32 v23, s6
; GFX11-NEXT: v_dual_mov_b32 v24, s5 :: v_dual_mov_b32 v25, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: ; implicit-def: $sgpr77
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr75
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10003,7 +9721,7 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s95, s94, 16
; SI-NEXT: v_readfirstlane_b32 s4, v12
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s83, 16
@@ -10083,7 +9801,7 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s61, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB19_4
+; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s83
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -10293,11 +10011,8 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v24, v25, v24
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v26
; SI-NEXT: v_or_b32_e32 v25, v27, v25
-; SI-NEXT: s_branch .LBB19_5
+; SI-NEXT: s_branch .LBB19_4
; SI-NEXT: .LBB19_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB19_2
-; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -10330,7 +10045,7 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB19_5: ; %end
+; SI-NEXT: .LBB19_4: ; %end
; SI-NEXT: v_readlane_b32 s83, v32, 27
; SI-NEXT: v_readlane_b32 s82, v32, 26
; SI-NEXT: v_readlane_b32 s81, v32, 25
@@ -10443,7 +10158,7 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s80, s71, 16
; VI-NEXT: v_readfirstlane_b32 s4, v12
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s87, 16
@@ -10523,7 +10238,7 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s61, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB19_4
+; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v25, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s87
@@ -10630,11 +10345,8 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; VI-NEXT: v_add_f16_sdwa v26, v26, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v25, s6, v25
; VI-NEXT: v_or_b32_e32 v25, v25, v26
-; VI-NEXT: s_branch .LBB19_5
+; VI-NEXT: s_branch .LBB19_4
; VI-NEXT: .LBB19_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB19_2
-; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -10667,7 +10379,7 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB19_5: ; %end
+; VI-NEXT: .LBB19_4: ; %end
; VI-NEXT: v_readlane_b32 s87, v32, 31
; VI-NEXT: v_readlane_b32 s86, v32, 30
; VI-NEXT: v_readlane_b32 s85, v32, 29
@@ -10790,9 +10502,9 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s59, s59, s72
; GFX9-NEXT: s_pack_ll_b32_b16 s60, s60, s63
; GFX9-NEXT: s_pack_ll_b32_b16 s61, s61, s62
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v25, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v25 op_sel_hi:[1,0]
@@ -10821,10 +10533,8 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v23, s59, v25 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v24, s60, v25 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v25, s61, v25 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB19_5
+; GFX9-NEXT: s_branch .LBB19_4
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -10857,7 +10567,7 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB19_5: ; %end
+; GFX9-NEXT: .LBB19_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -10942,10 +10652,10 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s23, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s24, s47, s60
; GFX11-NEXT: s_pack_ll_b32_b16 s25, s46, s59
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -10975,8 +10685,6 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v25, 0x200, s25 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -11220,9 +10928,9 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB21_3
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_4
+; SI-NEXT: s_cbranch_execnz .LBB21_3
; SI-NEXT: .LBB21_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v25, s61, 1.0
; SI-NEXT: v_add_f32_e64 v24, s60, 1.0
@@ -11250,10 +10958,8 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB21_5
+; SI-NEXT: s_branch .LBB21_4
; SI-NEXT: .LBB21_3:
-; SI-NEXT: s_branch .LBB21_2
-; SI-NEXT: .LBB21_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -11286,7 +10992,7 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB21_5: ; %end
+; SI-NEXT: .LBB21_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -11351,9 +11057,9 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB21_3
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_4
+; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v25, s61, 1.0
; VI-NEXT: v_add_f32_e64 v24, s60, 1.0
@@ -11381,10 +11087,8 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB21_5
+; VI-NEXT: s_branch .LBB21_4
; VI-NEXT: .LBB21_3:
-; VI-NEXT: s_branch .LBB21_2
-; VI-NEXT: .LBB21_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -11417,7 +11121,7 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB21_5: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -11482,9 +11186,9 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
+; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v25, s61, 1.0
; GFX9-NEXT: v_add_f32_e64 v24, s60, 1.0
@@ -11512,10 +11216,8 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB21_5
+; GFX9-NEXT: s_branch .LBB21_4
; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
-; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -11548,7 +11250,7 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB21_5: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -11614,10 +11316,10 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
; GFX11-NEXT: .LBB21_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v25, s61, 1.0
; GFX11-NEXT: v_add_f32_e64 v24, s60, 1.0
@@ -11645,10 +11347,8 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB21_5
+; GFX11-NEXT: s_branch .LBB21_4
; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
-; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -11665,7 +11365,7 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB21_5: ; %end
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -11899,7 +11599,7 @@ define inreg <26 x float> @bitcast_v13i64_to_v26f32_scalar(<13 x i64> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s40, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s41, v0
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB23_3
; SI-NEXT: .LBB23_2: ; %cmp.true
@@ -11957,8 +11657,6 @@ define inreg <26 x float> @bitcast_v13i64_to_v26f32_scalar(<13 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v24, s7
; SI-NEXT: v_mov_b32_e32 v25, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v13i64_to_v26f32_scalar:
; VI: ; %bb.0:
@@ -11977,7 +11675,7 @@ define inreg <26 x float> @bitcast_v13i64_to_v26f32_scalar(<13 x i64> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s40, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s41, v0
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
@@ -12035,8 +11733,6 @@ define inreg <26 x float> @bitcast_v13i64_to_v26f32_scalar(<13 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v24, s7
; VI-NEXT: v_mov_b32_e32 v25, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v13i64_to_v26f32_scalar:
; GFX9: ; %bb.0:
@@ -12055,7 +11751,7 @@ define inreg <26 x float> @bitcast_v13i64_to_v26f32_scalar(<13 x i64> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s40, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s41, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
@@ -12113,8 +11809,6 @@ define inreg <26 x float> @bitcast_v13i64_to_v26f32_scalar(<13 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v24, s7
; GFX9-NEXT: v_mov_b32_e32 v25, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v13i64_to_v26f32_scalar:
; GFX11: ; %bb.0:
@@ -12130,7 +11824,7 @@ define inreg <26 x float> @bitcast_v13i64_to_v26f32_scalar(<13 x i64> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s11, v0
; GFX11-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-NEXT: s_mov_b32 s12, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
@@ -12177,8 +11871,6 @@ define inreg <26 x float> @bitcast_v13i64_to_v26f32_scalar(<13 x i64> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v22, s7 :: v_dual_mov_b32 v23, s6
; GFX11-NEXT: v_dual_mov_b32 v24, s5 :: v_dual_mov_b32 v25, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: s_branch .LBB23_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12405,9 +12097,9 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB25_3
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_4
+; SI-NEXT: s_cbranch_execnz .LBB25_3
; SI-NEXT: .LBB25_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v25, s61, 1.0
; SI-NEXT: v_add_f32_e64 v24, s60, 1.0
@@ -12435,10 +12127,8 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB25_5
+; SI-NEXT: s_branch .LBB25_4
; SI-NEXT: .LBB25_3:
-; SI-NEXT: s_branch .LBB25_2
-; SI-NEXT: .LBB25_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -12471,7 +12161,7 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB25_5: ; %end
+; SI-NEXT: .LBB25_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -12536,9 +12226,9 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB25_3
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_4
+; VI-NEXT: s_cbranch_execnz .LBB25_3
; VI-NEXT: .LBB25_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v25, s61, 1.0
; VI-NEXT: v_add_f32_e64 v24, s60, 1.0
@@ -12566,10 +12256,8 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB25_5
+; VI-NEXT: s_branch .LBB25_4
; VI-NEXT: .LBB25_3:
-; VI-NEXT: s_branch .LBB25_2
-; VI-NEXT: .LBB25_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -12602,7 +12290,7 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB25_5: ; %end
+; VI-NEXT: .LBB25_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -12667,9 +12355,9 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_4
+; GFX9-NEXT: s_cbranch_execnz .LBB25_3
; GFX9-NEXT: .LBB25_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v25, s61, 1.0
; GFX9-NEXT: v_add_f32_e64 v24, s60, 1.0
@@ -12697,10 +12385,8 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB25_5
+; GFX9-NEXT: s_branch .LBB25_4
; GFX9-NEXT: .LBB25_3:
-; GFX9-NEXT: s_branch .LBB25_2
-; GFX9-NEXT: .LBB25_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -12733,7 +12419,7 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB25_5: ; %end
+; GFX9-NEXT: .LBB25_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -12799,10 +12485,10 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_3
; GFX11-NEXT: .LBB25_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v25, s61, 1.0
; GFX11-NEXT: v_add_f32_e64 v24, s60, 1.0
@@ -12830,10 +12516,8 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB25_5
+; GFX11-NEXT: s_branch .LBB25_4
; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
-; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -12850,7 +12534,7 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB25_5: ; %end
+; GFX11-NEXT: .LBB25_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -13055,9 +12739,9 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB27_3
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB27_4
+; SI-NEXT: s_cbranch_execnz .LBB27_3
; SI-NEXT: .LBB27_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
; SI-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
@@ -13072,10 +12756,8 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; SI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; SI-NEXT: s_branch .LBB27_5
+; SI-NEXT: s_branch .LBB27_4
; SI-NEXT: .LBB27_3:
-; SI-NEXT: s_branch .LBB27_2
-; SI-NEXT: .LBB27_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -13108,7 +12790,7 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB27_5: ; %end
+; SI-NEXT: .LBB27_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -13173,9 +12855,9 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB27_3
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_4
+; VI-NEXT: s_cbranch_execnz .LBB27_3
; VI-NEXT: .LBB27_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
; VI-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
@@ -13190,10 +12872,8 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; VI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; VI-NEXT: s_branch .LBB27_5
+; VI-NEXT: s_branch .LBB27_4
; VI-NEXT: .LBB27_3:
-; VI-NEXT: s_branch .LBB27_2
-; VI-NEXT: .LBB27_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -13226,7 +12906,7 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB27_5: ; %end
+; VI-NEXT: .LBB27_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -13291,9 +12971,9 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_4
+; GFX9-NEXT: s_cbranch_execnz .LBB27_3
; GFX9-NEXT: .LBB27_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
; GFX9-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
@@ -13308,10 +12988,8 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; GFX9-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX9-NEXT: s_branch .LBB27_5
+; GFX9-NEXT: s_branch .LBB27_4
; GFX9-NEXT: .LBB27_3:
-; GFX9-NEXT: s_branch .LBB27_2
-; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -13344,7 +13022,7 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB27_5: ; %end
+; GFX9-NEXT: .LBB27_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -13410,10 +13088,10 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB27_3
; GFX11-NEXT: .LBB27_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
; GFX11-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
@@ -13428,10 +13106,8 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; GFX11-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX11-NEXT: s_branch .LBB27_5
+; GFX11-NEXT: s_branch .LBB27_4
; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
-; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -13448,7 +13124,7 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB27_5: ; %end
+; GFX11-NEXT: .LBB27_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -14251,7 +13927,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: v_writelane_b32 v45, s48, 8
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s48, s5, 16
; SI-NEXT: s_lshr_b32 s39, s7, 16
@@ -14279,7 +13955,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[78:79], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[88:89], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB29_4
+; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v25, s5, 1.0
; SI-NEXT: v_add_f32_e64 v24, s4, 1.0
@@ -14338,36 +14014,8 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v1
-; SI-NEXT: s_branch .LBB29_5
+; SI-NEXT: s_branch .LBB29_4
; SI-NEXT: .LBB29_3:
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: s_branch .LBB29_2
-; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -14421,7 +14069,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v36, s76
; SI-NEXT: v_mov_b32_e32 v37, s78
; SI-NEXT: v_mov_b32_e32 v38, s88
-; SI-NEXT: .LBB29_5: ; %end
+; SI-NEXT: .LBB29_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v38
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v37
@@ -14541,7 +14189,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 ; 4-byte Folded Spill
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s42, s6, 16
; VI-NEXT: s_lshr_b32 s43, s7, 16
@@ -14569,7 +14217,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; VI-NEXT: s_lshr_b32 s89, s18, 16
; VI-NEXT: s_lshr_b32 s90, s17, 16
; VI-NEXT: s_lshr_b32 s91, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB29_4
+; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v25, s6, 1.0
; VI-NEXT: v_add_f32_e64 v24, s7, 1.0
@@ -14623,36 +14271,8 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v41, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v42, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v43, 16, v0
-; VI-NEXT: s_branch .LBB29_5
+; VI-NEXT: s_branch .LBB29_4
; VI-NEXT: .LBB29_3:
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: s_branch .LBB29_2
-; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -14705,7 +14325,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v28, s44
; VI-NEXT: v_mov_b32_e32 v27, s43
; VI-NEXT: v_mov_b32_e32 v26, s42
-; VI-NEXT: .LBB29_5: ; %end
+; VI-NEXT: .LBB29_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v43, 16, v43
; VI-NEXT: v_lshlrev_b32_e32 v42, 16, v42
; VI-NEXT: v_lshlrev_b32_e32 v41, 16, v41
@@ -14786,7 +14406,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s42, s6, 16
; GFX9-NEXT: s_lshr_b32 s43, s7, 16
@@ -14814,7 +14434,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: s_lshr_b32 s89, s18, 16
; GFX9-NEXT: s_lshr_b32 s90, s17, 16
; GFX9-NEXT: s_lshr_b32 s91, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
+; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v25, s6, 1.0
; GFX9-NEXT: v_add_f32_e64 v24, s7, 1.0
@@ -14868,36 +14488,8 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v0
-; GFX9-NEXT: s_branch .LBB29_5
+; GFX9-NEXT: s_branch .LBB29_4
; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: s_branch .LBB29_2
-; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -14950,7 +14542,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v28, s44
; GFX9-NEXT: v_mov_b32_e32 v27, s43
; GFX9-NEXT: v_mov_b32_e32 v26, s42
-; GFX9-NEXT: .LBB29_5: ; %end
+; GFX9-NEXT: .LBB29_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -15024,7 +14616,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s11, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s12, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s4, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s5, 16
@@ -15053,7 +14645,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s77, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s78, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f32_e64 v25, s4, 1.0
; GFX11-TRUE16-NEXT: v_add_f32_e64 v24, s5, 1.0
@@ -15107,36 +14699,8 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB29_5
+; GFX11-TRUE16-NEXT: s_branch .LBB29_4
; GFX11-TRUE16-NEXT: .LBB29_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: s_branch .LBB29_2
-; GFX11-TRUE16-NEXT: .LBB29_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -15163,7 +14727,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
-; GFX11-TRUE16-NEXT: .LBB29_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB29_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
@@ -15220,7 +14784,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s11, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s12, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s4, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s5, 16
@@ -15249,7 +14813,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s77, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s78, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f32_e64 v21, s4, 1.0
; GFX11-FAKE16-NEXT: v_add_f32_e64 v22, s5, 1.0
@@ -15303,36 +14867,8 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v3
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v4
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v5
-; GFX11-FAKE16-NEXT: s_branch .LBB29_5
+; GFX11-FAKE16-NEXT: s_branch .LBB29_4
; GFX11-FAKE16-NEXT: .LBB29_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: s_branch .LBB29_2
-; GFX11-FAKE16-NEXT: .LBB29_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s0 :: v_dual_mov_b32 v4, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v2, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s16 :: v_dual_mov_b32 v1, s17
@@ -15359,7 +14895,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
-; GFX11-FAKE16-NEXT: .LBB29_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB29_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v69, 0xffff, v0
@@ -16655,7 +16191,7 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; SI-NEXT: s_lshr_b32 s34, s35, 16
; SI-NEXT: v_readfirstlane_b32 s4, v12
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s83, 16
@@ -16927,9 +16463,6 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v52i16_to_v26f32_scalar:
; VI: ; %bb.0:
@@ -17009,7 +16542,7 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; VI-NEXT: s_lshr_b32 s70, s71, 16
; VI-NEXT: v_readfirstlane_b32 s4, v12
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s87, 16
@@ -17285,9 +16818,6 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v52i16_to_v26f32_scalar:
; GFX9: ; %bb.0:
@@ -17373,9 +16903,9 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s59, s59, s72
; GFX9-NEXT: s_pack_ll_b32_b16 s60, s60, s63
; GFX9-NEXT: s_pack_ll_b32_b16 s61, s61, s62
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_4
+; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -17403,10 +16933,8 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v23, s59, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v24, s60, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v25, s61, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB31_5
+; GFX9-NEXT: s_branch .LBB31_4
; GFX9-NEXT: .LBB31_3:
-; GFX9-NEXT: s_branch .LBB31_2
-; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -17439,7 +16967,7 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB31_5: ; %end
+; GFX9-NEXT: .LBB31_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -17524,10 +17052,10 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s23, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s24, s47, s60
; GFX11-NEXT: s_pack_ll_b32_b16 s25, s46, s59
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-NEXT: .LBB31_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -17557,8 +17085,6 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v25, s25, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
-; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -18361,7 +17887,7 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: v_writelane_b32 v45, s48, 8
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s48, s5, 16
; SI-NEXT: s_lshr_b32 s39, s7, 16
@@ -18389,7 +17915,7 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[78:79], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[88:89], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB33_4
+; SI-NEXT: s_cbranch_execnz .LBB33_3
; SI-NEXT: .LBB33_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v25, s5, 1.0
; SI-NEXT: v_add_f32_e64 v24, s4, 1.0
@@ -18448,36 +17974,8 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v1
-; SI-NEXT: s_branch .LBB33_5
+; SI-NEXT: s_branch .LBB33_4
; SI-NEXT: .LBB33_3:
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: s_branch .LBB33_2
-; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -18531,7 +18029,7 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v36, s76
; SI-NEXT: v_mov_b32_e32 v37, s78
; SI-NEXT: v_mov_b32_e32 v38, s88
-; SI-NEXT: .LBB33_5: ; %end
+; SI-NEXT: .LBB33_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v38
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v37
@@ -18651,7 +18149,7 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 ; 4-byte Folded Spill
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s42, s6, 16
; VI-NEXT: s_lshr_b32 s43, s7, 16
@@ -18679,7 +18177,7 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; VI-NEXT: s_lshr_b32 s89, s18, 16
; VI-NEXT: s_lshr_b32 s90, s17, 16
; VI-NEXT: s_lshr_b32 s91, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB33_4
+; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v25, s6, 1.0
; VI-NEXT: v_add_f32_e64 v24, s7, 1.0
@@ -18733,36 +18231,8 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v41, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v42, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v43, 16, v0
-; VI-NEXT: s_branch .LBB33_5
+; VI-NEXT: s_branch .LBB33_4
; VI-NEXT: .LBB33_3:
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: s_branch .LBB33_2
-; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -18815,7 +18285,7 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v28, s44
; VI-NEXT: v_mov_b32_e32 v27, s43
; VI-NEXT: v_mov_b32_e32 v26, s42
-; VI-NEXT: .LBB33_5: ; %end
+; VI-NEXT: .LBB33_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v43, 16, v43
; VI-NEXT: v_lshlrev_b32_e32 v42, 16, v42
; VI-NEXT: v_lshlrev_b32_e32 v41, 16, v41
@@ -18896,7 +18366,7 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s42, s6, 16
; GFX9-NEXT: s_lshr_b32 s43, s7, 16
@@ -18924,7 +18394,7 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: s_lshr_b32 s89, s18, 16
; GFX9-NEXT: s_lshr_b32 s90, s17, 16
; GFX9-NEXT: s_lshr_b32 s91, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
+; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v25, s6, 1.0
; GFX9-NEXT: v_add_f32_e64 v24, s7, 1.0
@@ -18978,36 +18448,8 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v0
-; GFX9-NEXT: s_branch .LBB33_5
+; GFX9-NEXT: s_branch .LBB33_4
; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: s_branch .LBB33_2
-; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -19060,7 +18502,7 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v28, s44
; GFX9-NEXT: v_mov_b32_e32 v27, s43
; GFX9-NEXT: v_mov_b32_e32 v26, s42
-; GFX9-NEXT: .LBB33_5: ; %end
+; GFX9-NEXT: .LBB33_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -19134,7 +18576,7 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s11, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s12, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s4, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s5, 16
@@ -19163,7 +18605,7 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s77, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s78, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f32_e64 v25, s4, 1.0
; GFX11-TRUE16-NEXT: v_add_f32_e64 v24, s5, 1.0
@@ -19217,36 +18659,8 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB33_5
+; GFX11-TRUE16-NEXT: s_branch .LBB33_4
; GFX11-TRUE16-NEXT: .LBB33_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: s_branch .LBB33_2
-; GFX11-TRUE16-NEXT: .LBB33_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -19273,7 +18687,7 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
-; GFX11-TRUE16-NEXT: .LBB33_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB33_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
@@ -19330,7 +18744,7 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s11, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s12, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s4, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s5, 16
@@ -19359,7 +18773,7 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s77, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s78, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f32_e64 v21, s4, 1.0
; GFX11-FAKE16-NEXT: v_add_f32_e64 v22, s5, 1.0
@@ -19413,36 +18827,8 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v3
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v4
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v5
-; GFX11-FAKE16-NEXT: s_branch .LBB33_5
+; GFX11-FAKE16-NEXT: s_branch .LBB33_4
; GFX11-FAKE16-NEXT: .LBB33_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: s_branch .LBB33_2
-; GFX11-FAKE16-NEXT: .LBB33_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s0 :: v_dual_mov_b32 v4, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v2, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s16 :: v_dual_mov_b32 v1, s17
@@ -19469,7 +18855,7 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
-; GFX11-FAKE16-NEXT: .LBB33_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB33_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v69, 0xffff, v0
@@ -20886,7 +20272,7 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: s_lshr_b32 s95, s94, 16
; SI-NEXT: v_readfirstlane_b32 s4, v12
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB35_3
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s83, 16
@@ -20966,7 +20352,7 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s61, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB35_4
+; SI-NEXT: s_cbranch_execnz .LBB35_3
; SI-NEXT: .LBB35_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s83
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -21176,11 +20562,8 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v24, v25, v24
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v26
; SI-NEXT: v_or_b32_e32 v25, v27, v25
-; SI-NEXT: s_branch .LBB35_5
+; SI-NEXT: s_branch .LBB35_4
; SI-NEXT: .LBB35_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB35_2
-; SI-NEXT: .LBB35_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -21213,7 +20596,7 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB35_5: ; %end
+; SI-NEXT: .LBB35_4: ; %end
; SI-NEXT: v_readlane_b32 s83, v32, 27
; SI-NEXT: v_readlane_b32 s82, v32, 26
; SI-NEXT: v_readlane_b32 s81, v32, 25
@@ -21326,7 +20709,7 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; VI-NEXT: s_lshr_b32 s80, s71, 16
; VI-NEXT: v_readfirstlane_b32 s4, v12
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s87, 16
@@ -21406,7 +20789,7 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s61, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB35_4
+; VI-NEXT: s_cbranch_execnz .LBB35_3
; VI-NEXT: .LBB35_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v25, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s87
@@ -21513,11 +20896,8 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; VI-NEXT: v_add_f16_sdwa v26, v26, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v25, s6, v25
; VI-NEXT: v_or_b32_e32 v25, v25, v26
-; VI-NEXT: s_branch .LBB35_5
+; VI-NEXT: s_branch .LBB35_4
; VI-NEXT: .LBB35_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB35_2
-; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -21550,7 +20930,7 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB35_5: ; %end
+; VI-NEXT: .LBB35_4: ; %end
; VI-NEXT: v_readlane_b32 s87, v32, 31
; VI-NEXT: v_readlane_b32 s86, v32, 30
; VI-NEXT: v_readlane_b32 s85, v32, 29
@@ -21673,9 +21053,9 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s59, s59, s72
; GFX9-NEXT: s_pack_ll_b32_b16 s60, s60, s63
; GFX9-NEXT: s_pack_ll_b32_b16 s61, s61, s62
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
+; GFX9-NEXT: s_cbranch_execnz .LBB35_3
; GFX9-NEXT: .LBB35_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v25, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v25 op_sel_hi:[1,0]
@@ -21704,10 +21084,8 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v23, s59, v25 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v24, s60, v25 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v25, s61, v25 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB35_5
+; GFX9-NEXT: s_branch .LBB35_4
; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
-; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -21740,7 +21118,7 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB35_5: ; %end
+; GFX9-NEXT: .LBB35_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -21825,10 +21203,10 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s23, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s24, s47, s60
; GFX11-NEXT: s_pack_ll_b32_b16 s25, s46, s59
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-NEXT: .LBB35_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -21858,8 +21236,6 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v25, 0x200, s25 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
-; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -22093,7 +21469,7 @@ define inreg <13 x double> @bitcast_v13i64_to_v13f64_scalar(<13 x i64> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s40, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s41, v0
-; SI-NEXT: s_cbranch_scc0 .LBB37_4
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB37_3
; SI-NEXT: .LBB37_2: ; %cmp.true
@@ -22151,8 +21527,6 @@ define inreg <13 x double> @bitcast_v13i64_to_v13f64_scalar(<13 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v24, s7
; SI-NEXT: v_mov_b32_e32 v25, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_4:
-; SI-NEXT: s_branch .LBB37_2
;
; VI-LABEL: bitcast_v13i64_to_v13f64_scalar:
; VI: ; %bb.0:
@@ -22171,7 +21545,7 @@ define inreg <13 x double> @bitcast_v13i64_to_v13f64_scalar(<13 x i64> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s40, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s41, v0
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB37_3
; VI-NEXT: .LBB37_2: ; %cmp.true
@@ -22229,8 +21603,6 @@ define inreg <13 x double> @bitcast_v13i64_to_v13f64_scalar(<13 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v24, s7
; VI-NEXT: v_mov_b32_e32 v25, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v13i64_to_v13f64_scalar:
; GFX9: ; %bb.0:
@@ -22249,7 +21621,7 @@ define inreg <13 x double> @bitcast_v13i64_to_v13f64_scalar(<13 x i64> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s40, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s41, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB37_3
; GFX9-NEXT: .LBB37_2: ; %cmp.true
@@ -22307,8 +21679,6 @@ define inreg <13 x double> @bitcast_v13i64_to_v13f64_scalar(<13 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v24, s7
; GFX9-NEXT: v_mov_b32_e32 v25, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v13i64_to_v13f64_scalar:
; GFX11: ; %bb.0:
@@ -22324,7 +21694,7 @@ define inreg <13 x double> @bitcast_v13i64_to_v13f64_scalar(<13 x i64> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s11, v0
; GFX11-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-NEXT: s_mov_b32 s12, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
@@ -22370,8 +21740,6 @@ define inreg <13 x double> @bitcast_v13i64_to_v13f64_scalar(<13 x i64> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v22, s7 :: v_dual_mov_b32 v23, s6
; GFX11-NEXT: v_dual_mov_b32 v24, s5 :: v_dual_mov_b32 v25, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_4:
-; GFX11-NEXT: s_branch .LBB37_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -22559,9 +21927,9 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB39_3
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_4
+; SI-NEXT: s_cbranch_execnz .LBB39_3
; SI-NEXT: .LBB39_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -22576,10 +21944,8 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; SI-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
; SI-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
; SI-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
-; SI-NEXT: s_branch .LBB39_5
+; SI-NEXT: s_branch .LBB39_4
; SI-NEXT: .LBB39_3:
-; SI-NEXT: s_branch .LBB39_2
-; SI-NEXT: .LBB39_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -22612,7 +21978,7 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB39_5: ; %end
+; SI-NEXT: .LBB39_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -22677,9 +22043,9 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB39_3
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_4
+; VI-NEXT: s_cbranch_execnz .LBB39_3
; VI-NEXT: .LBB39_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -22694,10 +22060,8 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; VI-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
; VI-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
; VI-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
-; VI-NEXT: s_branch .LBB39_5
+; VI-NEXT: s_branch .LBB39_4
; VI-NEXT: .LBB39_3:
-; VI-NEXT: s_branch .LBB39_2
-; VI-NEXT: .LBB39_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -22730,7 +22094,7 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB39_5: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -22795,9 +22159,9 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
+; GFX9-NEXT: s_cbranch_execnz .LBB39_3
; GFX9-NEXT: .LBB39_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -22812,10 +22176,8 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
; GFX9-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
; GFX9-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
-; GFX9-NEXT: s_branch .LBB39_5
+; GFX9-NEXT: s_branch .LBB39_4
; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
-; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -22848,7 +22210,7 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB39_5: ; %end
+; GFX9-NEXT: .LBB39_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -22914,10 +22276,10 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_3
; GFX11-NEXT: .LBB39_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -22932,10 +22294,8 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[20:21], s[56:57], 1.0
; GFX11-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
; GFX11-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
-; GFX11-NEXT: s_branch .LBB39_5
+; GFX11-NEXT: s_branch .LBB39_4
; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
-; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -22952,7 +22312,7 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB39_5: ; %end
+; GFX11-NEXT: .LBB39_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -23790,7 +23150,7 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; SI-NEXT: s_cmp_lg_u32 s14, 0
; SI-NEXT: v_readfirstlane_b32 s14, v0
; SI-NEXT: v_writelane_b32 v26, s48, 8
-; SI-NEXT: s_cbranch_scc0 .LBB41_4
+; SI-NEXT: s_cbranch_scc0 .LBB41_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s92, s5, 16
; SI-NEXT: s_lshr_b32 s93, s7, 16
@@ -23991,34 +23351,6 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB41_4:
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: s_branch .LBB41_2
;
; VI-LABEL: bitcast_v13i64_to_v52i16_scalar:
; VI: ; %bb.0:
@@ -24037,7 +23369,7 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s40, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s41, v0
-; VI-NEXT: s_cbranch_scc0 .LBB41_4
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s42, s6, 16
; VI-NEXT: s_lshr_b32 s43, s7, 16
@@ -24225,34 +23557,6 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v24, s7
; VI-NEXT: v_mov_b32_e32 v25, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB41_4:
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: s_branch .LBB41_2
;
; GFX9-LABEL: bitcast_v13i64_to_v52i16_scalar:
; GFX9: ; %bb.0:
@@ -24271,7 +23575,7 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s40, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s41, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s42, s6, 16
; GFX9-NEXT: s_lshr_b32 s43, s7, 16
@@ -24407,34 +23711,6 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v24, s7
; GFX9-NEXT: v_mov_b32_e32 v25, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB41_4:
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: s_branch .LBB41_2
;
; GFX11-LABEL: bitcast_v13i64_to_v52i16_scalar:
; GFX11: ; %bb.0:
@@ -24450,7 +23726,7 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s11, v0
; GFX11-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-NEXT: s_mov_b32 s78, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s12, s4, 16
; GFX11-NEXT: s_lshr_b32 s13, s5, 16
@@ -24575,34 +23851,6 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v22, s7 :: v_dual_mov_b32 v23, s6
; GFX11-NEXT: v_dual_mov_b32 v24, s5 :: v_dual_mov_b32 v25, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB41_4:
-; GFX11-NEXT: ; implicit-def: $sgpr77
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr75
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: s_branch .LBB41_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -25845,7 +25093,7 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; SI-NEXT: s_lshr_b32 s34, s35, 16
; SI-NEXT: v_readfirstlane_b32 s4, v12
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB43_4
+; SI-NEXT: s_cbranch_scc0 .LBB43_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s83, 16
@@ -26117,9 +25365,6 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB43_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB43_2
;
; VI-LABEL: bitcast_v52i16_to_v13i64_scalar:
; VI: ; %bb.0:
@@ -26199,7 +25444,7 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; VI-NEXT: s_lshr_b32 s70, s71, 16
; VI-NEXT: v_readfirstlane_b32 s4, v12
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB43_4
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s87, 16
@@ -26475,9 +25720,6 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB43_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB43_2
;
; GFX9-LABEL: bitcast_v52i16_to_v13i64_scalar:
; GFX9: ; %bb.0:
@@ -26563,9 +25805,9 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s59, s59, s72
; GFX9-NEXT: s_pack_ll_b32_b16 s60, s60, s63
; GFX9-NEXT: s_pack_ll_b32_b16 s61, s61, s62
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
+; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -26593,10 +25835,8 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v23, s59, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v24, s60, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v25, s61, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB43_5
+; GFX9-NEXT: s_branch .LBB43_4
; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
-; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -26629,7 +25869,7 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB43_5: ; %end
+; GFX9-NEXT: .LBB43_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -26714,10 +25954,10 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s23, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s24, s47, s60
; GFX11-NEXT: s_pack_ll_b32_b16 s25, s46, s59
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-NEXT: .LBB43_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -26747,8 +25987,6 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v25, s25, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
-; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -27586,7 +26824,7 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; SI-NEXT: s_cmp_lg_u32 s14, 0
; SI-NEXT: v_readfirstlane_b32 s14, v0
; SI-NEXT: v_writelane_b32 v26, s48, 8
-; SI-NEXT: s_cbranch_scc0 .LBB45_4
+; SI-NEXT: s_cbranch_scc0 .LBB45_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s92, s5, 16
; SI-NEXT: s_lshr_b32 s93, s7, 16
@@ -27787,34 +27025,6 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB45_4:
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: s_branch .LBB45_2
;
; VI-LABEL: bitcast_v13i64_to_v52f16_scalar:
; VI: ; %bb.0:
@@ -27833,7 +27043,7 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s40, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s41, v0
-; VI-NEXT: s_cbranch_scc0 .LBB45_4
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s42, s6, 16
; VI-NEXT: s_lshr_b32 s43, s7, 16
@@ -28021,34 +27231,6 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v24, s7
; VI-NEXT: v_mov_b32_e32 v25, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_4:
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: s_branch .LBB45_2
;
; GFX9-LABEL: bitcast_v13i64_to_v52f16_scalar:
; GFX9: ; %bb.0:
@@ -28067,7 +27249,7 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s40, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s41, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s42, s6, 16
; GFX9-NEXT: s_lshr_b32 s43, s7, 16
@@ -28203,34 +27385,6 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v24, s7
; GFX9-NEXT: v_mov_b32_e32 v25, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_4:
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: s_branch .LBB45_2
;
; GFX11-LABEL: bitcast_v13i64_to_v52f16_scalar:
; GFX11: ; %bb.0:
@@ -28246,7 +27400,7 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s11, v0
; GFX11-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-NEXT: s_mov_b32 s78, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s12, s4, 16
; GFX11-NEXT: s_lshr_b32 s13, s5, 16
@@ -28371,34 +27525,6 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v22, s7 :: v_dual_mov_b32 v23, s6
; GFX11-NEXT: v_dual_mov_b32 v24, s5 :: v_dual_mov_b32 v25, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_4:
-; GFX11-NEXT: ; implicit-def: $sgpr77
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr75
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: s_branch .LBB45_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -29762,7 +28888,7 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s95, s94, 16
; SI-NEXT: v_readfirstlane_b32 s4, v12
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB47_3
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s83, 16
@@ -29842,7 +28968,7 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s61, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB47_4
+; SI-NEXT: s_cbranch_execnz .LBB47_3
; SI-NEXT: .LBB47_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s83
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -30052,11 +29178,8 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v24, v25, v24
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v26
; SI-NEXT: v_or_b32_e32 v25, v27, v25
-; SI-NEXT: s_branch .LBB47_5
+; SI-NEXT: s_branch .LBB47_4
; SI-NEXT: .LBB47_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB47_2
-; SI-NEXT: .LBB47_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -30089,7 +29212,7 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB47_5: ; %end
+; SI-NEXT: .LBB47_4: ; %end
; SI-NEXT: v_readlane_b32 s83, v32, 27
; SI-NEXT: v_readlane_b32 s82, v32, 26
; SI-NEXT: v_readlane_b32 s81, v32, 25
@@ -30202,7 +29325,7 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s80, s71, 16
; VI-NEXT: v_readfirstlane_b32 s4, v12
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s87, 16
@@ -30282,7 +29405,7 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s61, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB47_4
+; VI-NEXT: s_cbranch_execnz .LBB47_3
; VI-NEXT: .LBB47_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v25, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s87
@@ -30389,11 +29512,8 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; VI-NEXT: v_add_f16_sdwa v26, v26, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v25, s6, v25
; VI-NEXT: v_or_b32_e32 v25, v25, v26
-; VI-NEXT: s_branch .LBB47_5
+; VI-NEXT: s_branch .LBB47_4
; VI-NEXT: .LBB47_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB47_2
-; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -30426,7 +29546,7 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB47_5: ; %end
+; VI-NEXT: .LBB47_4: ; %end
; VI-NEXT: v_readlane_b32 s87, v32, 31
; VI-NEXT: v_readlane_b32 s86, v32, 30
; VI-NEXT: v_readlane_b32 s85, v32, 29
@@ -30549,9 +29669,9 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s59, s59, s72
; GFX9-NEXT: s_pack_ll_b32_b16 s60, s60, s63
; GFX9-NEXT: s_pack_ll_b32_b16 s61, s61, s62
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
+; GFX9-NEXT: s_cbranch_execnz .LBB47_3
; GFX9-NEXT: .LBB47_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v25, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v25 op_sel_hi:[1,0]
@@ -30580,10 +29700,8 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v23, s59, v25 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v24, s60, v25 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v25, s61, v25 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB47_5
+; GFX9-NEXT: s_branch .LBB47_4
; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
-; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -30616,7 +29734,7 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB47_5: ; %end
+; GFX9-NEXT: .LBB47_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -30701,10 +29819,10 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s23, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s24, s47, s60
; GFX11-NEXT: s_pack_ll_b32_b16 s25, s46, s59
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-NEXT: .LBB47_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -30734,8 +29852,6 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v25, 0x200, s25 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -31499,7 +30615,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: v_writelane_b32 v45, s48, 8
-; SI-NEXT: s_cbranch_scc0 .LBB49_3
+; SI-NEXT: s_cbranch_scc0 .LBB49_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s48, s13, 16
; SI-NEXT: s_lshr_b32 s39, s15, 16
@@ -31527,7 +30643,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[78:79], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[88:89], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB49_4
+; SI-NEXT: s_cbranch_execnz .LBB49_3
; SI-NEXT: .LBB49_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[24:25], s[12:13], 1.0
; SI-NEXT: v_add_f64 v[22:23], s[14:15], 1.0
@@ -31573,36 +30689,8 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v1
-; SI-NEXT: s_branch .LBB49_5
+; SI-NEXT: s_branch .LBB49_4
; SI-NEXT: .LBB49_3:
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: s_branch .LBB49_2
-; SI-NEXT: .LBB49_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -31660,7 +30748,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v28, s44
; SI-NEXT: v_mov_b32_e32 v27, s42
; SI-NEXT: v_mov_b32_e32 v26, s40
-; SI-NEXT: .LBB49_5: ; %end
+; SI-NEXT: .LBB49_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v38
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v37
@@ -31780,7 +30868,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 ; 4-byte Folded Spill
-; VI-NEXT: s_cbranch_scc0 .LBB49_3
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s42, s9, 16
; VI-NEXT: s_lshr_b32 s63, s8, 16
@@ -31808,7 +30896,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; VI-NEXT: s_lshr_b32 s90, s18, 16
; VI-NEXT: s_lshr_b32 s62, s17, 16
; VI-NEXT: s_lshr_b32 s91, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB49_4
+; VI-NEXT: s_cbranch_execnz .LBB49_3
; VI-NEXT: .LBB49_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[24:25], s[8:9], 1.0
; VI-NEXT: v_add_f64 v[22:23], s[10:11], 1.0
@@ -31849,36 +30937,8 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v54, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v43, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v40, 16, v0
-; VI-NEXT: s_branch .LBB49_5
+; VI-NEXT: s_branch .LBB49_4
; VI-NEXT: .LBB49_3:
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: s_branch .LBB49_2
-; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -31931,7 +30991,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v33, s44
; VI-NEXT: v_mov_b32_e32 v31, s43
; VI-NEXT: v_mov_b32_e32 v28, s42
-; VI-NEXT: .LBB49_5: ; %end
+; VI-NEXT: .LBB49_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v40, 16, v40
; VI-NEXT: v_or_b32_sdwa v0, v0, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v40, 16, v43
@@ -32012,7 +31072,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s42, s9, 16
; GFX9-NEXT: s_lshr_b32 s63, s8, 16
@@ -32040,7 +31100,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; GFX9-NEXT: s_lshr_b32 s90, s18, 16
; GFX9-NEXT: s_lshr_b32 s62, s17, 16
; GFX9-NEXT: s_lshr_b32 s91, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
+; GFX9-NEXT: s_cbranch_execnz .LBB49_3
; GFX9-NEXT: .LBB49_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[24:25], s[8:9], 1.0
; GFX9-NEXT: v_add_f64 v[22:23], s[10:11], 1.0
@@ -32081,36 +31141,8 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v40, 16, v0
-; GFX9-NEXT: s_branch .LBB49_5
+; GFX9-NEXT: s_branch .LBB49_4
; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: s_branch .LBB49_2
-; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -32163,7 +31195,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v33, s44
; GFX9-NEXT: v_mov_b32_e32 v31, s43
; GFX9-NEXT: v_mov_b32_e32 v28, s42
-; GFX9-NEXT: .LBB49_5: ; %end
+; GFX9-NEXT: .LBB49_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff, v3
@@ -32237,7 +31269,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s10, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s12, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s9, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s58, s8, 16
@@ -32266,7 +31298,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s57, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s78, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], s[8:9], 1.0
; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], s[6:7], 1.0
@@ -32307,36 +31339,8 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB49_5
+; GFX11-TRUE16-NEXT: s_branch .LBB49_4
; GFX11-TRUE16-NEXT: .LBB49_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: s_branch .LBB49_2
-; GFX11-TRUE16-NEXT: .LBB49_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -32363,7 +31367,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s42 :: v_dual_mov_b32 v35, s41
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s40 :: v_dual_mov_b32 v31, s15
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, s14 :: v_dual_mov_b32 v28, s13
-; GFX11-TRUE16-NEXT: .LBB49_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB49_4: ; %end
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, v48 :: v_dual_mov_b32 v37, v37
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v30 :: v_dual_mov_b32 v65, v65
@@ -32422,7 +31426,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s10, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s12, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s9, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s58, s8, 16
@@ -32451,7 +31455,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s57, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s78, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f64 v[21:22], s[8:9], 1.0
; GFX11-FAKE16-NEXT: v_add_f64 v[23:24], s[6:7], 1.0
@@ -32492,36 +31496,8 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v2
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v5
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v4
-; GFX11-FAKE16-NEXT: s_branch .LBB49_5
+; GFX11-FAKE16-NEXT: s_branch .LBB49_4
; GFX11-FAKE16-NEXT: .LBB49_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: s_branch .LBB49_2
-; GFX11-FAKE16-NEXT: .LBB49_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v27, s22
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v13, s24
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s16 :: v_dual_mov_b32 v11, s26
@@ -32548,7 +31524,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v48, s43 :: v_dual_mov_b32 v37, s41
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v36, s40 :: v_dual_mov_b32 v33, s15
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s14 :: v_dual_mov_b32 v31, s13
-; GFX11-FAKE16-NEXT: .LBB49_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB49_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v68, 0xffff, v0
@@ -33844,7 +32820,7 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; SI-NEXT: s_lshr_b32 s34, s35, 16
; SI-NEXT: v_readfirstlane_b32 s4, v12
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB51_4
+; SI-NEXT: s_cbranch_scc0 .LBB51_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s83, 16
@@ -34116,9 +33092,6 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB51_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB51_2
;
; VI-LABEL: bitcast_v52i16_to_v13f64_scalar:
; VI: ; %bb.0:
@@ -34198,7 +33171,7 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; VI-NEXT: s_lshr_b32 s70, s71, 16
; VI-NEXT: v_readfirstlane_b32 s4, v12
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB51_4
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s87, 16
@@ -34474,9 +33447,6 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_4:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB51_2
;
; GFX9-LABEL: bitcast_v52i16_to_v13f64_scalar:
; GFX9: ; %bb.0:
@@ -34562,9 +33532,9 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s59, s59, s72
; GFX9-NEXT: s_pack_ll_b32_b16 s60, s60, s63
; GFX9-NEXT: s_pack_ll_b32_b16 s61, s61, s62
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB51_4
+; GFX9-NEXT: s_cbranch_execnz .LBB51_3
; GFX9-NEXT: .LBB51_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -34592,10 +33562,8 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v23, s59, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v24, s60, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v25, s61, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB51_5
+; GFX9-NEXT: s_branch .LBB51_4
; GFX9-NEXT: .LBB51_3:
-; GFX9-NEXT: s_branch .LBB51_2
-; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -34628,7 +33596,7 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB51_5: ; %end
+; GFX9-NEXT: .LBB51_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -34713,10 +33681,10 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s23, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s24, s47, s60
; GFX11-NEXT: s_pack_ll_b32_b16 s25, s46, s59
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-NEXT: .LBB51_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -34746,8 +33714,6 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v25, s25, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
-; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -35511,7 +34477,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: v_writelane_b32 v45, s48, 8
-; SI-NEXT: s_cbranch_scc0 .LBB53_3
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s48, s13, 16
; SI-NEXT: s_lshr_b32 s39, s15, 16
@@ -35539,7 +34505,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[78:79], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[88:89], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB53_4
+; SI-NEXT: s_cbranch_execnz .LBB53_3
; SI-NEXT: .LBB53_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[24:25], s[12:13], 1.0
; SI-NEXT: v_add_f64 v[22:23], s[14:15], 1.0
@@ -35585,36 +34551,8 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v1
-; SI-NEXT: s_branch .LBB53_5
+; SI-NEXT: s_branch .LBB53_4
; SI-NEXT: .LBB53_3:
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr93
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr95
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: s_branch .LBB53_2
-; SI-NEXT: .LBB53_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -35672,7 +34610,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; SI-NEXT: v_mov_b32_e32 v28, s44
; SI-NEXT: v_mov_b32_e32 v27, s42
; SI-NEXT: v_mov_b32_e32 v26, s40
-; SI-NEXT: .LBB53_5: ; %end
+; SI-NEXT: .LBB53_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v38
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v37
@@ -35792,7 +34730,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 ; 4-byte Folded Spill
-; VI-NEXT: s_cbranch_scc0 .LBB53_3
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s42, s9, 16
; VI-NEXT: s_lshr_b32 s63, s8, 16
@@ -35820,7 +34758,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; VI-NEXT: s_lshr_b32 s90, s18, 16
; VI-NEXT: s_lshr_b32 s62, s17, 16
; VI-NEXT: s_lshr_b32 s91, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB53_4
+; VI-NEXT: s_cbranch_execnz .LBB53_3
; VI-NEXT: .LBB53_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[24:25], s[8:9], 1.0
; VI-NEXT: v_add_f64 v[22:23], s[10:11], 1.0
@@ -35861,36 +34799,8 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; VI-NEXT: v_lshrrev_b32_e32 v54, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v43, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v40, 16, v0
-; VI-NEXT: s_branch .LBB53_5
+; VI-NEXT: s_branch .LBB53_4
; VI-NEXT: .LBB53_3:
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: s_branch .LBB53_2
-; VI-NEXT: .LBB53_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -35943,7 +34853,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; VI-NEXT: v_mov_b32_e32 v33, s44
; VI-NEXT: v_mov_b32_e32 v31, s43
; VI-NEXT: v_mov_b32_e32 v28, s42
-; VI-NEXT: .LBB53_5: ; %end
+; VI-NEXT: .LBB53_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v40, 16, v40
; VI-NEXT: v_or_b32_sdwa v0, v0, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v40, 16, v43
@@ -36024,7 +34934,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s42, s9, 16
; GFX9-NEXT: s_lshr_b32 s63, s8, 16
@@ -36052,7 +34962,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; GFX9-NEXT: s_lshr_b32 s90, s18, 16
; GFX9-NEXT: s_lshr_b32 s62, s17, 16
; GFX9-NEXT: s_lshr_b32 s91, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB53_4
+; GFX9-NEXT: s_cbranch_execnz .LBB53_3
; GFX9-NEXT: .LBB53_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[24:25], s[8:9], 1.0
; GFX9-NEXT: v_add_f64 v[22:23], s[10:11], 1.0
@@ -36093,36 +35003,8 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v40, 16, v0
-; GFX9-NEXT: s_branch .LBB53_5
+; GFX9-NEXT: s_branch .LBB53_4
; GFX9-NEXT: .LBB53_3:
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: s_branch .LBB53_2
-; GFX9-NEXT: .LBB53_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -36175,7 +35057,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v33, s44
; GFX9-NEXT: v_mov_b32_e32 v31, s43
; GFX9-NEXT: v_mov_b32_e32 v28, s42
-; GFX9-NEXT: .LBB53_5: ; %end
+; GFX9-NEXT: .LBB53_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff, v3
@@ -36249,7 +35131,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s10, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s12, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s9, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s58, s8, 16
@@ -36278,7 +35160,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; GFX11-TRUE16-NEXT: s_lshr_b32 s57, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s78, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], s[8:9], 1.0
; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], s[6:7], 1.0
@@ -36319,36 +35201,8 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB53_5
+; GFX11-TRUE16-NEXT: s_branch .LBB53_4
; GFX11-TRUE16-NEXT: .LBB53_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: s_branch .LBB53_2
-; GFX11-TRUE16-NEXT: .LBB53_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -36375,7 +35229,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s42 :: v_dual_mov_b32 v35, s41
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s40 :: v_dual_mov_b32 v31, s15
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, s14 :: v_dual_mov_b32 v28, s13
-; GFX11-TRUE16-NEXT: .LBB53_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB53_4: ; %end
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, v48 :: v_dual_mov_b32 v37, v37
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v30 :: v_dual_mov_b32 v65, v65
@@ -36434,7 +35288,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s10, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s12, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s9, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s58, s8, 16
@@ -36463,7 +35317,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; GFX11-FAKE16-NEXT: s_lshr_b32 s57, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s78, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f64 v[21:22], s[8:9], 1.0
; GFX11-FAKE16-NEXT: v_add_f64 v[23:24], s[6:7], 1.0
@@ -36504,36 +35358,8 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v2
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v5
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v4
-; GFX11-FAKE16-NEXT: s_branch .LBB53_5
+; GFX11-FAKE16-NEXT: s_branch .LBB53_4
; GFX11-FAKE16-NEXT: .LBB53_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: s_branch .LBB53_2
-; GFX11-FAKE16-NEXT: .LBB53_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v27, s22
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v13, s24
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s16 :: v_dual_mov_b32 v11, s26
@@ -36560,7 +35386,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v48, s43 :: v_dual_mov_b32 v37, s41
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v36, s40 :: v_dual_mov_b32 v33, s15
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s14 :: v_dual_mov_b32 v31, s13
-; GFX11-FAKE16-NEXT: .LBB53_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB53_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v68, 0xffff, v0
@@ -37977,7 +36803,7 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: s_lshr_b32 s95, s94, 16
; SI-NEXT: v_readfirstlane_b32 s4, v12
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB55_3
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s83, 16
@@ -38057,7 +36883,7 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s61, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB55_4
+; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s83
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -38267,11 +37093,8 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: v_or_b32_e32 v24, v25, v24
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v26
; SI-NEXT: v_or_b32_e32 v25, v27, v25
-; SI-NEXT: s_branch .LBB55_5
+; SI-NEXT: s_branch .LBB55_4
; SI-NEXT: .LBB55_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB55_2
-; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -38304,7 +37127,7 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB55_5: ; %end
+; SI-NEXT: .LBB55_4: ; %end
; SI-NEXT: v_readlane_b32 s83, v32, 27
; SI-NEXT: v_readlane_b32 s82, v32, 26
; SI-NEXT: v_readlane_b32 s81, v32, 25
@@ -38417,7 +37240,7 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; VI-NEXT: s_lshr_b32 s80, s71, 16
; VI-NEXT: v_readfirstlane_b32 s4, v12
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s87, 16
@@ -38497,7 +37320,7 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; VI-NEXT: s_and_b32 s4, 0xffff, s6
; VI-NEXT: s_lshl_b32 s5, s7, 16
; VI-NEXT: s_or_b32 s61, s4, s5
-; VI-NEXT: s_cbranch_execnz .LBB55_4
+; VI-NEXT: s_cbranch_execnz .LBB55_3
; VI-NEXT: .LBB55_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v25, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s87
@@ -38604,11 +37427,8 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; VI-NEXT: v_add_f16_sdwa v26, v26, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v25, s6, v25
; VI-NEXT: v_or_b32_e32 v25, v25, v26
-; VI-NEXT: s_branch .LBB55_5
+; VI-NEXT: s_branch .LBB55_4
; VI-NEXT: .LBB55_3:
-; VI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; VI-NEXT: s_branch .LBB55_2
-; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -38641,7 +37461,7 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB55_5: ; %end
+; VI-NEXT: .LBB55_4: ; %end
; VI-NEXT: v_readlane_b32 s87, v32, 31
; VI-NEXT: v_readlane_b32 s86, v32, 30
; VI-NEXT: v_readlane_b32 s85, v32, 29
@@ -38764,9 +37584,9 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX9-NEXT: s_pack_ll_b32_b16 s59, s59, s72
; GFX9-NEXT: s_pack_ll_b32_b16 s60, s60, s63
; GFX9-NEXT: s_pack_ll_b32_b16 s61, s61, s62
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
+; GFX9-NEXT: s_cbranch_execnz .LBB55_3
; GFX9-NEXT: .LBB55_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v25, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v25 op_sel_hi:[1,0]
@@ -38795,10 +37615,8 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX9-NEXT: v_pk_add_f16 v23, s59, v25 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v24, s60, v25 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v25, s61, v25 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB55_5
+; GFX9-NEXT: s_branch .LBB55_4
; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
-; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -38831,7 +37649,7 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB55_5: ; %end
+; GFX9-NEXT: .LBB55_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -38916,10 +37734,10 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-NEXT: s_pack_ll_b32_b16 s23, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s24, s47, s60
; GFX11-NEXT: s_pack_ll_b32_b16 s25, s46, s59
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-NEXT: .LBB55_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -38949,8 +37767,6 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-NEXT: v_pk_add_f16 v25, 0x200, s25 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
-; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -40295,7 +39111,7 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; SI-NEXT: s_lshr_b32 s93, s86, 16
; SI-NEXT: v_readfirstlane_b32 s4, v12
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB57_4
+; SI-NEXT: s_cbranch_scc0 .LBB57_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s5, s17, 0xffff
; SI-NEXT: s_lshl_b32 s7, s48, 16
@@ -40725,34 +39541,6 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB57_4:
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: s_branch .LBB57_2
;
; VI-LABEL: bitcast_v52i16_to_v52f16_scalar:
; VI: ; %bb.0:
@@ -40797,7 +39585,7 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; VI-NEXT: s_lshr_b32 s78, s77, 16
; VI-NEXT: v_readfirstlane_b32 s4, v12
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB57_3
; VI-NEXT: .LBB57_2: ; %cmp.true
@@ -40959,8 +39747,6 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v24, s8
; VI-NEXT: v_mov_b32_e32 v25, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v52i16_to_v52f16_scalar:
; GFX9: ; %bb.0:
@@ -41009,9 +39795,9 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
+; GFX9-NEXT: s_cbranch_execnz .LBB57_3
; GFX9-NEXT: .LBB57_2: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s91, s63
; GFX9-NEXT: v_pk_add_u16 v25, s4, 3 op_sel_hi:[1,0]
@@ -41091,10 +39877,8 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v23
; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v24
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX9-NEXT: s_branch .LBB57_5
+; GFX9-NEXT: s_branch .LBB57_4
; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
-; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v25, s91
; GFX9-NEXT: v_mov_b32_e32 v24, s90
; GFX9-NEXT: v_mov_b32_e32 v23, s89
@@ -41147,7 +39931,7 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v41, s8
; GFX9-NEXT: v_mov_b32_e32 v42, s7
; GFX9-NEXT: v_mov_b32_e32 v43, s6
-; GFX9-NEXT: .LBB57_5: ; %end
+; GFX9-NEXT: .LBB57_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -41247,10 +40031,10 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s62, 16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s78, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s78, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s78
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s57, s74, s57
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s47, s72, s47
@@ -41330,10 +40114,8 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-TRUE16-NEXT: s_branch .LBB57_5
+; GFX11-TRUE16-NEXT: s_branch .LBB57_4
; GFX11-TRUE16-NEXT: .LBB57_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB57_2
-; GFX11-TRUE16-NEXT: .LBB57_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, s77 :: v_dual_mov_b32 v24, s76
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, s74 :: v_dual_mov_b32 v22, s72
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, s75 :: v_dual_mov_b32 v20, s73
@@ -41360,7 +40142,7 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s9 :: v_dual_mov_b32 v55, s8
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, s6 :: v_dual_mov_b32 v65, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v66, s4 :: v_dual_mov_b32 v67, s7
-; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB57_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
@@ -41443,10 +40225,10 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s62, 16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s78, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s78, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s78
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-FAKE16-NEXT: .LBB57_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s57, s74, s57
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s47, s72, s47
@@ -41526,10 +40308,8 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v22
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
-; GFX11-FAKE16-NEXT: s_branch .LBB57_5
+; GFX11-FAKE16-NEXT: s_branch .LBB57_4
; GFX11-FAKE16-NEXT: .LBB57_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB57_2
-; GFX11-FAKE16-NEXT: .LBB57_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s77 :: v_dual_mov_b32 v22, s76
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s74 :: v_dual_mov_b32 v24, s72
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s75 :: v_dual_mov_b32 v16, s73
@@ -41556,7 +40336,7 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v54, s9 :: v_dual_mov_b32 v55, s8
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v64, s6 :: v_dual_mov_b32 v65, s5
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v66, s4 :: v_dual_mov_b32 v67, s7
-; GFX11-FAKE16-NEXT: .LBB57_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB57_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v69, 0xffff, v0
@@ -42561,9 +41341,9 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
-; SI-NEXT: s_cbranch_scc0 .LBB59_3
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB59_4
+; SI-NEXT: s_cbranch_execnz .LBB59_3
; SI-NEXT: .LBB59_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s91
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -42803,10 +41583,8 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
; SI-NEXT: v_lshr_b64 v[42:43], v[0:1], 16
-; SI-NEXT: s_branch .LBB59_5
+; SI-NEXT: s_branch .LBB59_4
; SI-NEXT: .LBB59_3:
-; SI-NEXT: s_branch .LBB59_2
-; SI-NEXT: .LBB59_4:
; SI-NEXT: v_mov_b32_e32 v45, s61
; SI-NEXT: v_mov_b32_e32 v47, s59
; SI-NEXT: v_mov_b32_e32 v29, s56
@@ -42865,7 +41643,7 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v30, s58
; SI-NEXT: v_mov_b32_e32 v55, s47
; SI-NEXT: v_mov_b32_e32 v26, s44
-; SI-NEXT: .LBB59_5: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v42
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
; SI-NEXT: v_or_b32_e32 v0, v2, v0
@@ -43011,9 +41789,9 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 ; 4-byte Folded Spill
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
+; VI-NEXT: s_cbranch_execnz .LBB59_3
; VI-NEXT: .LBB59_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v26, 0x200
; VI-NEXT: v_add_f16_e32 v0, s16, v26
@@ -43068,10 +41846,8 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v27, s47, v26
; VI-NEXT: v_add_f16_e32 v25, s44, v26
; VI-NEXT: v_add_f16_e32 v26, s45, v26
-; VI-NEXT: s_branch .LBB59_5
+; VI-NEXT: s_branch .LBB59_4
; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
-; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v26, s45
; VI-NEXT: v_mov_b32_e32 v25, s44
; VI-NEXT: v_mov_b32_e32 v27, s47
@@ -43124,7 +41900,7 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v43, s43
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: .LBB59_5: ; %end
+; VI-NEXT: .LBB59_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v43, 16, v43
; VI-NEXT: v_lshlrev_b32_e32 v42, 16, v42
; VI-NEXT: v_lshlrev_b32_e32 v41, 16, v41
@@ -43231,9 +42007,9 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
+; GFX9-NEXT: s_cbranch_execnz .LBB59_3
; GFX9-NEXT: .LBB59_2: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s91, s63
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
@@ -43314,10 +42090,8 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v23
; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v24
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX9-NEXT: s_branch .LBB59_5
+; GFX9-NEXT: s_branch .LBB59_4
; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
-; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v25, s91
; GFX9-NEXT: v_mov_b32_e32 v24, s90
; GFX9-NEXT: v_mov_b32_e32 v23, s89
@@ -43370,7 +42144,7 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v41, s8
; GFX9-NEXT: v_mov_b32_e32 v42, s7
; GFX9-NEXT: v_mov_b32_e32 v43, s6
-; GFX9-NEXT: .LBB59_5: ; %end
+; GFX9-NEXT: .LBB59_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -43470,10 +42244,10 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s62, 16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s78, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s78, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s78
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s57, s74, s57
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s47, s72, s47
@@ -43553,10 +42327,8 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-TRUE16-NEXT: s_branch .LBB59_5
+; GFX11-TRUE16-NEXT: s_branch .LBB59_4
; GFX11-TRUE16-NEXT: .LBB59_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB59_2
-; GFX11-TRUE16-NEXT: .LBB59_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, s77 :: v_dual_mov_b32 v24, s76
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, s74 :: v_dual_mov_b32 v22, s72
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, s75 :: v_dual_mov_b32 v20, s73
@@ -43583,7 +42355,7 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s9 :: v_dual_mov_b32 v55, s8
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, s6 :: v_dual_mov_b32 v65, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v66, s4 :: v_dual_mov_b32 v67, s7
-; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB59_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
@@ -43666,10 +42438,10 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s62, 16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s78, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s78, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s78
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-FAKE16-NEXT: .LBB59_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s57, s74, s57
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s47, s72, s47
@@ -43749,10 +42521,8 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v22
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
-; GFX11-FAKE16-NEXT: s_branch .LBB59_5
+; GFX11-FAKE16-NEXT: s_branch .LBB59_4
; GFX11-FAKE16-NEXT: .LBB59_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB59_2
-; GFX11-FAKE16-NEXT: .LBB59_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s77 :: v_dual_mov_b32 v22, s76
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s74 :: v_dual_mov_b32 v24, s72
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s75 :: v_dual_mov_b32 v16, s73
@@ -43779,7 +42549,7 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v54, s9 :: v_dual_mov_b32 v55, s8
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v64, s6 :: v_dual_mov_b32 v65, s5
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v66, s4 :: v_dual_mov_b32 v67, s7
-; GFX11-FAKE16-NEXT: .LBB59_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB59_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v69, 0xffff, v0
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
index b4b888e2111f4..2e37b39005ebf 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
@@ -208,7 +208,7 @@ define inreg <28 x float> @bitcast_v28i32_to_v28f32_scalar(<28 x i32> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s42, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s43, v0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -270,8 +270,6 @@ define inreg <28 x float> @bitcast_v28i32_to_v28f32_scalar(<28 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v26, s7
; SI-NEXT: v_mov_b32_e32 v27, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v28i32_to_v28f32_scalar:
; VI: ; %bb.0:
@@ -292,7 +290,7 @@ define inreg <28 x float> @bitcast_v28i32_to_v28f32_scalar(<28 x i32> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s42, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s43, v0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -354,8 +352,6 @@ define inreg <28 x float> @bitcast_v28i32_to_v28f32_scalar(<28 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v26, s7
; VI-NEXT: v_mov_b32_e32 v27, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v28i32_to_v28f32_scalar:
; GFX9: ; %bb.0:
@@ -376,7 +372,7 @@ define inreg <28 x float> @bitcast_v28i32_to_v28f32_scalar(<28 x i32> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s42, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s43, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -438,8 +434,6 @@ define inreg <28 x float> @bitcast_v28i32_to_v28f32_scalar(<28 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v26, s7
; GFX9-NEXT: v_mov_b32_e32 v27, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v28i32_to_v28f32_scalar:
; GFX11: ; %bb.0:
@@ -457,7 +451,7 @@ define inreg <28 x float> @bitcast_v28i32_to_v28f32_scalar(<28 x i32> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s13, v0
; GFX11-NEXT: s_cmp_lg_u32 s14, 0
; GFX11-NEXT: s_mov_b32 s14, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -507,8 +501,6 @@ define inreg <28 x float> @bitcast_v28i32_to_v28f32_scalar(<28 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v24, s7 :: v_dual_mov_b32 v25, s6
; GFX11-NEXT: v_dual_mov_b32 v26, s5 :: v_dual_mov_b32 v27, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -744,9 +736,9 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v27, s63, 1.0
; SI-NEXT: v_add_f32_e64 v26, s62, 1.0
@@ -776,10 +768,8 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB3_5
+; SI-NEXT: s_branch .LBB3_4
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -812,7 +802,7 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB3_5: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -879,9 +869,9 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v27, s63, 1.0
; VI-NEXT: v_add_f32_e64 v26, s62, 1.0
@@ -911,10 +901,8 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB3_5
+; VI-NEXT: s_branch .LBB3_4
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -947,7 +935,7 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB3_5: ; %end
+; VI-NEXT: .LBB3_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -1014,9 +1002,9 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v27, s63, 1.0
; GFX9-NEXT: v_add_f32_e64 v26, s62, 1.0
@@ -1046,10 +1034,8 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB3_5
+; GFX9-NEXT: s_branch .LBB3_4
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -1082,7 +1068,7 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB3_5: ; %end
+; GFX9-NEXT: .LBB3_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -1150,10 +1136,10 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v27, s63, 1.0
; GFX11-NEXT: v_add_f32_e64 v26, s62, 1.0
@@ -1183,10 +1169,8 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB3_5
+; GFX11-NEXT: s_branch .LBB3_4
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -1203,7 +1187,7 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB3_5: ; %end
+; GFX11-NEXT: .LBB3_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -1440,7 +1424,7 @@ define inreg <14 x i64> @bitcast_v28i32_to_v14i64_scalar(<28 x i32> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s42, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s43, v0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB5_3
; SI-NEXT: .LBB5_2: ; %cmp.true
@@ -1502,8 +1486,6 @@ define inreg <14 x i64> @bitcast_v28i32_to_v14i64_scalar(<28 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v26, s7
; SI-NEXT: v_mov_b32_e32 v27, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v28i32_to_v14i64_scalar:
; VI: ; %bb.0:
@@ -1524,7 +1506,7 @@ define inreg <14 x i64> @bitcast_v28i32_to_v14i64_scalar(<28 x i32> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s42, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s43, v0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -1586,8 +1568,6 @@ define inreg <14 x i64> @bitcast_v28i32_to_v14i64_scalar(<28 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v26, s7
; VI-NEXT: v_mov_b32_e32 v27, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v28i32_to_v14i64_scalar:
; GFX9: ; %bb.0:
@@ -1608,7 +1588,7 @@ define inreg <14 x i64> @bitcast_v28i32_to_v14i64_scalar(<28 x i32> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s42, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s43, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -1670,8 +1650,6 @@ define inreg <14 x i64> @bitcast_v28i32_to_v14i64_scalar(<28 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v26, s7
; GFX9-NEXT: v_mov_b32_e32 v27, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v28i32_to_v14i64_scalar:
; GFX11: ; %bb.0:
@@ -1689,7 +1667,7 @@ define inreg <14 x i64> @bitcast_v28i32_to_v14i64_scalar(<28 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s13, v0
; GFX11-NEXT: s_cmp_lg_u32 s14, 0
; GFX11-NEXT: s_mov_b32 s14, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -1739,8 +1717,6 @@ define inreg <14 x i64> @bitcast_v28i32_to_v14i64_scalar(<28 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v24, s7 :: v_dual_mov_b32 v25, s6
; GFX11-NEXT: v_dual_mov_b32 v26, s5 :: v_dual_mov_b32 v27, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1967,7 +1943,7 @@ define inreg <28 x i32> @bitcast_v14i64_to_v28i32_scalar(<14 x i64> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s42, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s43, v0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB7_3
; SI-NEXT: .LBB7_2: ; %cmp.true
@@ -2029,8 +2005,6 @@ define inreg <28 x i32> @bitcast_v14i64_to_v28i32_scalar(<14 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v26, s7
; SI-NEXT: v_mov_b32_e32 v27, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v14i64_to_v28i32_scalar:
; VI: ; %bb.0:
@@ -2051,7 +2025,7 @@ define inreg <28 x i32> @bitcast_v14i64_to_v28i32_scalar(<14 x i64> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s42, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s43, v0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -2113,8 +2087,6 @@ define inreg <28 x i32> @bitcast_v14i64_to_v28i32_scalar(<14 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v26, s7
; VI-NEXT: v_mov_b32_e32 v27, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v14i64_to_v28i32_scalar:
; GFX9: ; %bb.0:
@@ -2135,7 +2107,7 @@ define inreg <28 x i32> @bitcast_v14i64_to_v28i32_scalar(<14 x i64> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s42, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s43, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
@@ -2197,8 +2169,6 @@ define inreg <28 x i32> @bitcast_v14i64_to_v28i32_scalar(<14 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v26, s7
; GFX9-NEXT: v_mov_b32_e32 v27, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v14i64_to_v28i32_scalar:
; GFX11: ; %bb.0:
@@ -2216,7 +2186,7 @@ define inreg <28 x i32> @bitcast_v14i64_to_v28i32_scalar(<14 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s13, v0
; GFX11-NEXT: s_cmp_lg_u32 s14, 0
; GFX11-NEXT: s_mov_b32 s14, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
@@ -2266,8 +2236,6 @@ define inreg <28 x i32> @bitcast_v14i64_to_v28i32_scalar(<14 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v24, s7 :: v_dual_mov_b32 v25, s6
; GFX11-NEXT: v_dual_mov_b32 v26, s5 :: v_dual_mov_b32 v27, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2487,7 +2455,7 @@ define inreg <14 x double> @bitcast_v28i32_to_v14f64_scalar(<28 x i32> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s42, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s43, v0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB9_3
; SI-NEXT: .LBB9_2: ; %cmp.true
@@ -2549,8 +2517,6 @@ define inreg <14 x double> @bitcast_v28i32_to_v14f64_scalar(<28 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v26, s7
; SI-NEXT: v_mov_b32_e32 v27, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v28i32_to_v14f64_scalar:
; VI: ; %bb.0:
@@ -2571,7 +2537,7 @@ define inreg <14 x double> @bitcast_v28i32_to_v14f64_scalar(<28 x i32> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s42, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s43, v0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -2633,8 +2599,6 @@ define inreg <14 x double> @bitcast_v28i32_to_v14f64_scalar(<28 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v26, s7
; VI-NEXT: v_mov_b32_e32 v27, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v28i32_to_v14f64_scalar:
; GFX9: ; %bb.0:
@@ -2655,7 +2619,7 @@ define inreg <14 x double> @bitcast_v28i32_to_v14f64_scalar(<28 x i32> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s42, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s43, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -2717,8 +2681,6 @@ define inreg <14 x double> @bitcast_v28i32_to_v14f64_scalar(<28 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v26, s7
; GFX9-NEXT: v_mov_b32_e32 v27, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v28i32_to_v14f64_scalar:
; GFX11: ; %bb.0:
@@ -2736,7 +2698,7 @@ define inreg <14 x double> @bitcast_v28i32_to_v14f64_scalar(<28 x i32> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s13, v0
; GFX11-NEXT: s_cmp_lg_u32 s14, 0
; GFX11-NEXT: s_mov_b32 s14, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -2786,8 +2748,6 @@ define inreg <14 x double> @bitcast_v28i32_to_v14f64_scalar(<28 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v24, s7 :: v_dual_mov_b32 v25, s6
; GFX11-NEXT: v_dual_mov_b32 v26, s5 :: v_dual_mov_b32 v27, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2981,9 +2941,9 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
; SI-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
@@ -2999,10 +2959,8 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; SI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; SI-NEXT: s_branch .LBB11_5
+; SI-NEXT: s_branch .LBB11_4
; SI-NEXT: .LBB11_3:
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -3035,7 +2993,7 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB11_5: ; %end
+; SI-NEXT: .LBB11_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -3102,9 +3060,9 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
; VI-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
@@ -3120,10 +3078,8 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; VI-NEXT: s_branch .LBB11_5
+; VI-NEXT: s_branch .LBB11_4
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -3156,7 +3112,7 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB11_5: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -3223,9 +3179,9 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
; GFX9-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
@@ -3241,10 +3197,8 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX9-NEXT: s_branch .LBB11_5
+; GFX9-NEXT: s_branch .LBB11_4
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -3277,7 +3231,7 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB11_5: ; %end
+; GFX9-NEXT: .LBB11_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -3345,10 +3299,10 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
; GFX11-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
@@ -3364,10 +3318,8 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX11-NEXT: s_branch .LBB11_5
+; GFX11-NEXT: s_branch .LBB11_4
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -3384,7 +3336,7 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB11_5: ; %end
+; GFX11-NEXT: .LBB11_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -4293,7 +4245,7 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; SI-NEXT: s_cmp_lg_u32 s40, 0
; SI-NEXT: v_readfirstlane_b32 s40, v0
; SI-NEXT: v_writelane_b32 v28, s53, 13
-; SI-NEXT: s_cbranch_scc0 .LBB13_4
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s30, s5, 16
; SI-NEXT: s_lshr_b32 s31, s7, 16
@@ -4513,36 +4465,6 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB13_4:
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: s_branch .LBB13_2
;
; VI-LABEL: bitcast_v28i32_to_v56i16_scalar:
; VI: ; %bb.0:
@@ -4570,7 +4492,7 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s43, v0
; VI-NEXT: v_writelane_b32 v28, s35, 3
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s44, s6, 16
; VI-NEXT: s_lshr_b32 s45, s7, 16
@@ -4780,36 +4702,6 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v28i32_to_v56i16_scalar:
; GFX9: ; %bb.0:
@@ -4830,7 +4722,7 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s42, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s43, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s44, s6, 16
; GFX9-NEXT: s_lshr_b32 s45, s7, 16
@@ -4976,36 +4868,6 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v26, s7
; GFX9-NEXT: v_mov_b32_e32 v27, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v28i32_to_v56i16_scalar:
; GFX11: ; %bb.0:
@@ -5023,7 +4885,7 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s13, v0
; GFX11-NEXT: s_cmp_lg_u32 s14, 0
; GFX11-NEXT: s_mov_b32 s90, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s14, s4, 16
; GFX11-NEXT: s_lshr_b32 s15, s5, 16
@@ -5157,36 +5019,6 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v24, s7 :: v_dual_mov_b32 v25, s6
; GFX11-NEXT: v_dual_mov_b32 v26, s5 :: v_dual_mov_b32 v27, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: ; implicit-def: $sgpr89
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr79
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr77
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr75
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6533,7 +6365,7 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; SI-NEXT: s_lshr_b32 s71, s80, 16
; SI-NEXT: v_readfirstlane_b32 s4, v14
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s87, 16
@@ -6827,9 +6659,6 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v56i16_to_v28i32_scalar:
; VI: ; %bb.0:
@@ -7390,9 +7219,9 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s61, s61, s74
; GFX9-NEXT: s_pack_ll_b32_b16 s62, s62, s73
; GFX9-NEXT: s_pack_ll_b32_b16 s63, s63, s72
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -7422,10 +7251,8 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v25, s61, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v26, s62, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v27, s63, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB15_5
+; GFX9-NEXT: s_branch .LBB15_4
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -7458,7 +7285,7 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB15_5: ; %end
+; GFX9-NEXT: .LBB15_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -7549,10 +7376,10 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s25, s58, s63
; GFX11-NEXT: s_pack_ll_b32_b16 s26, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s27, s44, s57
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -7584,8 +7411,6 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v27, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -8494,7 +8319,7 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; SI-NEXT: s_cmp_lg_u32 s40, 0
; SI-NEXT: v_readfirstlane_b32 s40, v0
; SI-NEXT: v_writelane_b32 v28, s53, 13
-; SI-NEXT: s_cbranch_scc0 .LBB17_4
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s30, s5, 16
; SI-NEXT: s_lshr_b32 s31, s7, 16
@@ -8714,36 +8539,6 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB17_4:
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: s_branch .LBB17_2
;
; VI-LABEL: bitcast_v28i32_to_v56f16_scalar:
; VI: ; %bb.0:
@@ -8771,7 +8566,7 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s43, v0
; VI-NEXT: v_writelane_b32 v28, s35, 3
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s44, s6, 16
; VI-NEXT: s_lshr_b32 s45, s7, 16
@@ -8981,36 +8776,6 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v28i32_to_v56f16_scalar:
; GFX9: ; %bb.0:
@@ -9031,7 +8796,7 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s42, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s43, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s44, s6, 16
; GFX9-NEXT: s_lshr_b32 s45, s7, 16
@@ -9177,36 +8942,6 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v26, s7
; GFX9-NEXT: v_mov_b32_e32 v27, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v28i32_to_v56f16_scalar:
; GFX11: ; %bb.0:
@@ -9224,7 +8959,7 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s13, v0
; GFX11-NEXT: s_cmp_lg_u32 s14, 0
; GFX11-NEXT: s_mov_b32 s90, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s14, s4, 16
; GFX11-NEXT: s_lshr_b32 s15, s5, 16
@@ -9358,36 +9093,6 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v24, s7 :: v_dual_mov_b32 v25, s6
; GFX11-NEXT: v_dual_mov_b32 v26, s5 :: v_dual_mov_b32 v27, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: ; implicit-def: $sgpr89
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr79
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr77
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr75
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10871,7 +10576,7 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s68, s34, 16
; SI-NEXT: v_readfirstlane_b32 s4, v14
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s87, 16
@@ -10957,7 +10662,7 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s63, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB19_4
+; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s87
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -11183,11 +10888,8 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v26, v27, v26
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v28
; SI-NEXT: v_or_b32_e32 v27, v29, v27
-; SI-NEXT: s_branch .LBB19_5
+; SI-NEXT: s_branch .LBB19_4
; SI-NEXT: .LBB19_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB19_2
-; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -11220,7 +10922,7 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB19_5: ; %end
+; SI-NEXT: .LBB19_4: ; %end
; SI-NEXT: v_readlane_b32 s87, v32, 31
; SI-NEXT: v_readlane_b32 s86, v32, 30
; SI-NEXT: v_readlane_b32 s85, v32, 29
@@ -11731,9 +11433,9 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s61, s61, s74
; GFX9-NEXT: s_pack_ll_b32_b16 s62, s62, s73
; GFX9-NEXT: s_pack_ll_b32_b16 s63, s63, s72
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v27, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v27 op_sel_hi:[1,0]
@@ -11764,10 +11466,8 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v25, s61, v27 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v26, s62, v27 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v27, s63, v27 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB19_5
+; GFX9-NEXT: s_branch .LBB19_4
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -11800,7 +11500,7 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB19_5: ; %end
+; GFX9-NEXT: .LBB19_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -11891,10 +11591,10 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s25, s58, s63
; GFX11-NEXT: s_pack_ll_b32_b16 s26, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s27, s44, s57
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -11926,8 +11626,6 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v27, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -12180,9 +11878,9 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB21_3
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_4
+; SI-NEXT: s_cbranch_execnz .LBB21_3
; SI-NEXT: .LBB21_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v27, s63, 1.0
; SI-NEXT: v_add_f32_e64 v26, s62, 1.0
@@ -12212,10 +11910,8 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB21_5
+; SI-NEXT: s_branch .LBB21_4
; SI-NEXT: .LBB21_3:
-; SI-NEXT: s_branch .LBB21_2
-; SI-NEXT: .LBB21_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -12248,7 +11944,7 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB21_5: ; %end
+; SI-NEXT: .LBB21_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -12315,9 +12011,9 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB21_3
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_4
+; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v27, s63, 1.0
; VI-NEXT: v_add_f32_e64 v26, s62, 1.0
@@ -12347,10 +12043,8 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB21_5
+; VI-NEXT: s_branch .LBB21_4
; VI-NEXT: .LBB21_3:
-; VI-NEXT: s_branch .LBB21_2
-; VI-NEXT: .LBB21_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -12383,7 +12077,7 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB21_5: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -12450,9 +12144,9 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
+; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v27, s63, 1.0
; GFX9-NEXT: v_add_f32_e64 v26, s62, 1.0
@@ -12482,10 +12176,8 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB21_5
+; GFX9-NEXT: s_branch .LBB21_4
; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
-; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -12518,7 +12210,7 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB21_5: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -12586,10 +12278,10 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
; GFX11-NEXT: .LBB21_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v27, s63, 1.0
; GFX11-NEXT: v_add_f32_e64 v26, s62, 1.0
@@ -12619,10 +12311,8 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB21_5
+; GFX11-NEXT: s_branch .LBB21_4
; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
-; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -12639,7 +12329,7 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB21_5: ; %end
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -12883,7 +12573,7 @@ define inreg <28 x float> @bitcast_v14i64_to_v28f32_scalar(<14 x i64> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s42, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s43, v0
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB23_3
; SI-NEXT: .LBB23_2: ; %cmp.true
@@ -12945,8 +12635,6 @@ define inreg <28 x float> @bitcast_v14i64_to_v28f32_scalar(<14 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v26, s7
; SI-NEXT: v_mov_b32_e32 v27, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v14i64_to_v28f32_scalar:
; VI: ; %bb.0:
@@ -12967,7 +12655,7 @@ define inreg <28 x float> @bitcast_v14i64_to_v28f32_scalar(<14 x i64> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s42, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s43, v0
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
@@ -13029,8 +12717,6 @@ define inreg <28 x float> @bitcast_v14i64_to_v28f32_scalar(<14 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v26, s7
; VI-NEXT: v_mov_b32_e32 v27, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v14i64_to_v28f32_scalar:
; GFX9: ; %bb.0:
@@ -13051,7 +12737,7 @@ define inreg <28 x float> @bitcast_v14i64_to_v28f32_scalar(<14 x i64> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s42, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s43, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
@@ -13113,8 +12799,6 @@ define inreg <28 x float> @bitcast_v14i64_to_v28f32_scalar(<14 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v26, s7
; GFX9-NEXT: v_mov_b32_e32 v27, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v14i64_to_v28f32_scalar:
; GFX11: ; %bb.0:
@@ -13132,7 +12816,7 @@ define inreg <28 x float> @bitcast_v14i64_to_v28f32_scalar(<14 x i64> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s13, v0
; GFX11-NEXT: s_cmp_lg_u32 s14, 0
; GFX11-NEXT: s_mov_b32 s14, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
@@ -13182,8 +12866,6 @@ define inreg <28 x float> @bitcast_v14i64_to_v28f32_scalar(<14 x i64> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v24, s7 :: v_dual_mov_b32 v25, s6
; GFX11-NEXT: v_dual_mov_b32 v26, s5 :: v_dual_mov_b32 v27, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: s_branch .LBB23_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13419,9 +13101,9 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB25_3
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_4
+; SI-NEXT: s_cbranch_execnz .LBB25_3
; SI-NEXT: .LBB25_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v27, s63, 1.0
; SI-NEXT: v_add_f32_e64 v26, s62, 1.0
@@ -13451,10 +13133,8 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB25_5
+; SI-NEXT: s_branch .LBB25_4
; SI-NEXT: .LBB25_3:
-; SI-NEXT: s_branch .LBB25_2
-; SI-NEXT: .LBB25_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -13487,7 +13167,7 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB25_5: ; %end
+; SI-NEXT: .LBB25_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -13554,9 +13234,9 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB25_3
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_4
+; VI-NEXT: s_cbranch_execnz .LBB25_3
; VI-NEXT: .LBB25_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v27, s63, 1.0
; VI-NEXT: v_add_f32_e64 v26, s62, 1.0
@@ -13586,10 +13266,8 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB25_5
+; VI-NEXT: s_branch .LBB25_4
; VI-NEXT: .LBB25_3:
-; VI-NEXT: s_branch .LBB25_2
-; VI-NEXT: .LBB25_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -13622,7 +13300,7 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB25_5: ; %end
+; VI-NEXT: .LBB25_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -13689,9 +13367,9 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_4
+; GFX9-NEXT: s_cbranch_execnz .LBB25_3
; GFX9-NEXT: .LBB25_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v27, s63, 1.0
; GFX9-NEXT: v_add_f32_e64 v26, s62, 1.0
@@ -13721,10 +13399,8 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB25_5
+; GFX9-NEXT: s_branch .LBB25_4
; GFX9-NEXT: .LBB25_3:
-; GFX9-NEXT: s_branch .LBB25_2
-; GFX9-NEXT: .LBB25_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -13757,7 +13433,7 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB25_5: ; %end
+; GFX9-NEXT: .LBB25_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -13825,10 +13501,10 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_3
; GFX11-NEXT: .LBB25_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v27, s63, 1.0
; GFX11-NEXT: v_add_f32_e64 v26, s62, 1.0
@@ -13858,10 +13534,8 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB25_5
+; GFX11-NEXT: s_branch .LBB25_4
; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
-; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -13878,7 +13552,7 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB25_5: ; %end
+; GFX11-NEXT: .LBB25_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -14089,9 +13763,9 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB27_3
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB27_4
+; SI-NEXT: s_cbranch_execnz .LBB27_3
; SI-NEXT: .LBB27_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
; SI-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
@@ -14107,10 +13781,8 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; SI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; SI-NEXT: s_branch .LBB27_5
+; SI-NEXT: s_branch .LBB27_4
; SI-NEXT: .LBB27_3:
-; SI-NEXT: s_branch .LBB27_2
-; SI-NEXT: .LBB27_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -14143,7 +13815,7 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB27_5: ; %end
+; SI-NEXT: .LBB27_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -14210,9 +13882,9 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB27_3
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_4
+; VI-NEXT: s_cbranch_execnz .LBB27_3
; VI-NEXT: .LBB27_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
; VI-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
@@ -14228,10 +13900,8 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; VI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; VI-NEXT: s_branch .LBB27_5
+; VI-NEXT: s_branch .LBB27_4
; VI-NEXT: .LBB27_3:
-; VI-NEXT: s_branch .LBB27_2
-; VI-NEXT: .LBB27_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -14264,7 +13934,7 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB27_5: ; %end
+; VI-NEXT: .LBB27_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -14331,9 +14001,9 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_4
+; GFX9-NEXT: s_cbranch_execnz .LBB27_3
; GFX9-NEXT: .LBB27_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
; GFX9-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
@@ -14349,10 +14019,8 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; GFX9-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX9-NEXT: s_branch .LBB27_5
+; GFX9-NEXT: s_branch .LBB27_4
; GFX9-NEXT: .LBB27_3:
-; GFX9-NEXT: s_branch .LBB27_2
-; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -14385,7 +14053,7 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB27_5: ; %end
+; GFX9-NEXT: .LBB27_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -14453,10 +14121,10 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB27_3
; GFX11-NEXT: .LBB27_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
; GFX11-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
@@ -14472,10 +14140,8 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; GFX11-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX11-NEXT: s_branch .LBB27_5
+; GFX11-NEXT: s_branch .LBB27_4
; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
-; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -14492,7 +14158,7 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB27_5: ; %end
+; GFX11-NEXT: .LBB27_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -15383,7 +15049,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: v_writelane_b32 v58, s53, 13
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s53, s5, 16
; SI-NEXT: s_lshr_b32 s52, s7, 16
@@ -15413,7 +15079,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[92:93], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB29_4
+; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v27, s5, 1.0
; SI-NEXT: v_add_f32_e64 v26, s4, 1.0
@@ -15478,38 +15144,8 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v1
-; SI-NEXT: s_branch .LBB29_5
+; SI-NEXT: s_branch .LBB29_4
; SI-NEXT: .LBB29_3:
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: s_branch .LBB29_2
-; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -15567,7 +15203,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v48, s88
; SI-NEXT: v_mov_b32_e32 v49, s90
; SI-NEXT: v_mov_b32_e32 v50, s92
-; SI-NEXT: .LBB29_5: ; %end
+; SI-NEXT: .LBB29_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v50
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v0, v39
@@ -15716,7 +15352,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: v_writelane_b32 v56, s35, 3
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s44, s6, 16
; VI-NEXT: s_lshr_b32 s45, s7, 16
@@ -15746,7 +15382,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; VI-NEXT: s_lshr_b32 s31, s18, 16
; VI-NEXT: s_lshr_b32 s34, s17, 16
; VI-NEXT: s_lshr_b32 s35, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB29_4
+; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v27, s6, 1.0
; VI-NEXT: v_add_f32_e64 v26, s7, 1.0
@@ -15804,38 +15440,8 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v45, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v46, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v47, 16, v0
-; VI-NEXT: s_branch .LBB29_5
+; VI-NEXT: s_branch .LBB29_4
; VI-NEXT: .LBB29_3:
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: s_branch .LBB29_2
-; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -15892,7 +15498,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v30, s46
; VI-NEXT: v_mov_b32_e32 v29, s45
; VI-NEXT: v_mov_b32_e32 v28, s44
-; VI-NEXT: .LBB29_5: ; %end
+; VI-NEXT: .LBB29_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v47, 16, v47
; VI-NEXT: v_lshlrev_b32_e32 v46, 16, v46
; VI-NEXT: v_lshlrev_b32_e32 v45, 16, v45
@@ -15994,7 +15600,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s44, s6, 16
; GFX9-NEXT: s_lshr_b32 s45, s7, 16
@@ -16024,7 +15630,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: s_lshr_b32 s93, s18, 16
; GFX9-NEXT: s_lshr_b32 s94, s17, 16
; GFX9-NEXT: s_lshr_b32 s95, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
+; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v27, s6, 1.0
; GFX9-NEXT: v_add_f32_e64 v26, s7, 1.0
@@ -16082,38 +15688,8 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v47, 16, v0
-; GFX9-NEXT: s_branch .LBB29_5
+; GFX9-NEXT: s_branch .LBB29_4
; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: s_branch .LBB29_2
-; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -16170,7 +15746,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v30, s46
; GFX9-NEXT: v_mov_b32_e32 v29, s45
; GFX9-NEXT: v_mov_b32_e32 v28, s44
-; GFX9-NEXT: .LBB29_5: ; %end
+; GFX9-NEXT: .LBB29_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -16254,7 +15830,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s13, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s14, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s14, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s4, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s5, 16
@@ -16285,7 +15861,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s89, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s90, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f32_e64 v27, s4, 1.0
; GFX11-TRUE16-NEXT: v_add_f32_e64 v26, s5, 1.0
@@ -16343,38 +15919,8 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB29_5
+; GFX11-TRUE16-NEXT: s_branch .LBB29_4
; GFX11-TRUE16-NEXT: .LBB29_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr89
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr79
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB29_2
-; GFX11-TRUE16-NEXT: .LBB29_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -16403,7 +15949,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
-; GFX11-TRUE16-NEXT: .LBB29_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB29_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
@@ -16465,7 +16011,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s13, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s14, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s14, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s4, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s5, 16
@@ -16496,7 +16042,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s89, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s90, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f32_e64 v23, s4, 1.0
; GFX11-FAKE16-NEXT: v_add_f32_e64 v24, s5, 1.0
@@ -16554,38 +16100,8 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v2
-; GFX11-FAKE16-NEXT: s_branch .LBB29_5
+; GFX11-FAKE16-NEXT: s_branch .LBB29_4
; GFX11-FAKE16-NEXT: .LBB29_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr89
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr79
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB29_2
-; GFX11-FAKE16-NEXT: .LBB29_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v7, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s16 :: v_dual_mov_b32 v5, s17
@@ -16614,7 +16130,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
-; GFX11-FAKE16-NEXT: .LBB29_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB29_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v80, 0xffff, v0
@@ -18018,7 +17534,7 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; SI-NEXT: s_lshr_b32 s71, s80, 16
; SI-NEXT: v_readfirstlane_b32 s4, v14
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s87, 16
@@ -18312,9 +17828,6 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v56i16_to_v28f32_scalar:
; VI: ; %bb.0:
@@ -18875,9 +18388,9 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s61, s61, s74
; GFX9-NEXT: s_pack_ll_b32_b16 s62, s62, s73
; GFX9-NEXT: s_pack_ll_b32_b16 s63, s63, s72
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_4
+; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -18907,10 +18420,8 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v25, s61, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v26, s62, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v27, s63, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB31_5
+; GFX9-NEXT: s_branch .LBB31_4
; GFX9-NEXT: .LBB31_3:
-; GFX9-NEXT: s_branch .LBB31_2
-; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -18943,7 +18454,7 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB31_5: ; %end
+; GFX9-NEXT: .LBB31_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -19034,10 +18545,10 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s25, s58, s63
; GFX11-NEXT: s_pack_ll_b32_b16 s26, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s27, s44, s57
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-NEXT: .LBB31_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -19069,8 +18580,6 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v27, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
-; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -19961,7 +19470,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: v_writelane_b32 v58, s53, 13
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s53, s5, 16
; SI-NEXT: s_lshr_b32 s52, s7, 16
@@ -19991,7 +19500,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[92:93], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB33_4
+; SI-NEXT: s_cbranch_execnz .LBB33_3
; SI-NEXT: .LBB33_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v27, s5, 1.0
; SI-NEXT: v_add_f32_e64 v26, s4, 1.0
@@ -20056,38 +19565,8 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v1
-; SI-NEXT: s_branch .LBB33_5
+; SI-NEXT: s_branch .LBB33_4
; SI-NEXT: .LBB33_3:
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: s_branch .LBB33_2
-; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -20145,7 +19624,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v48, s88
; SI-NEXT: v_mov_b32_e32 v49, s90
; SI-NEXT: v_mov_b32_e32 v50, s92
-; SI-NEXT: .LBB33_5: ; %end
+; SI-NEXT: .LBB33_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v50
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v0, v39
@@ -20294,7 +19773,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: v_writelane_b32 v56, s35, 3
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s44, s6, 16
; VI-NEXT: s_lshr_b32 s45, s7, 16
@@ -20324,7 +19803,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; VI-NEXT: s_lshr_b32 s31, s18, 16
; VI-NEXT: s_lshr_b32 s34, s17, 16
; VI-NEXT: s_lshr_b32 s35, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB33_4
+; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v27, s6, 1.0
; VI-NEXT: v_add_f32_e64 v26, s7, 1.0
@@ -20382,38 +19861,8 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v45, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v46, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v47, 16, v0
-; VI-NEXT: s_branch .LBB33_5
+; VI-NEXT: s_branch .LBB33_4
; VI-NEXT: .LBB33_3:
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: s_branch .LBB33_2
-; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -20470,7 +19919,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v30, s46
; VI-NEXT: v_mov_b32_e32 v29, s45
; VI-NEXT: v_mov_b32_e32 v28, s44
-; VI-NEXT: .LBB33_5: ; %end
+; VI-NEXT: .LBB33_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v47, 16, v47
; VI-NEXT: v_lshlrev_b32_e32 v46, 16, v46
; VI-NEXT: v_lshlrev_b32_e32 v45, 16, v45
@@ -20572,7 +20021,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s44, s6, 16
; GFX9-NEXT: s_lshr_b32 s45, s7, 16
@@ -20602,7 +20051,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: s_lshr_b32 s93, s18, 16
; GFX9-NEXT: s_lshr_b32 s94, s17, 16
; GFX9-NEXT: s_lshr_b32 s95, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
+; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v27, s6, 1.0
; GFX9-NEXT: v_add_f32_e64 v26, s7, 1.0
@@ -20660,38 +20109,8 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v47, 16, v0
-; GFX9-NEXT: s_branch .LBB33_5
+; GFX9-NEXT: s_branch .LBB33_4
; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: s_branch .LBB33_2
-; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -20748,7 +20167,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v30, s46
; GFX9-NEXT: v_mov_b32_e32 v29, s45
; GFX9-NEXT: v_mov_b32_e32 v28, s44
-; GFX9-NEXT: .LBB33_5: ; %end
+; GFX9-NEXT: .LBB33_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -20832,7 +20251,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s13, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s14, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s14, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s4, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s5, 16
@@ -20863,7 +20282,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s89, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s90, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f32_e64 v27, s4, 1.0
; GFX11-TRUE16-NEXT: v_add_f32_e64 v26, s5, 1.0
@@ -20921,38 +20340,8 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB33_5
+; GFX11-TRUE16-NEXT: s_branch .LBB33_4
; GFX11-TRUE16-NEXT: .LBB33_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr89
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr79
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB33_2
-; GFX11-TRUE16-NEXT: .LBB33_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -20981,7 +20370,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
-; GFX11-TRUE16-NEXT: .LBB33_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB33_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
@@ -21043,7 +20432,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s13, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s14, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s14, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s4, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s5, 16
@@ -21074,7 +20463,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s89, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s90, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f32_e64 v23, s4, 1.0
; GFX11-FAKE16-NEXT: v_add_f32_e64 v24, s5, 1.0
@@ -21132,38 +20521,8 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v2
-; GFX11-FAKE16-NEXT: s_branch .LBB33_5
+; GFX11-FAKE16-NEXT: s_branch .LBB33_4
; GFX11-FAKE16-NEXT: .LBB33_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr89
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr79
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB33_2
-; GFX11-FAKE16-NEXT: .LBB33_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v7, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s16 :: v_dual_mov_b32 v5, s17
@@ -21192,7 +20551,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
-; GFX11-FAKE16-NEXT: .LBB33_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB33_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v80, 0xffff, v0
@@ -22733,7 +22092,7 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; SI-NEXT: s_lshr_b32 s68, s34, 16
; SI-NEXT: v_readfirstlane_b32 s4, v14
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB35_3
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s87, 16
@@ -22819,7 +22178,7 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s63, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB35_4
+; SI-NEXT: s_cbranch_execnz .LBB35_3
; SI-NEXT: .LBB35_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s87
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -23045,11 +22404,8 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v26, v27, v26
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v28
; SI-NEXT: v_or_b32_e32 v27, v29, v27
-; SI-NEXT: s_branch .LBB35_5
+; SI-NEXT: s_branch .LBB35_4
; SI-NEXT: .LBB35_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB35_2
-; SI-NEXT: .LBB35_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -23082,7 +22438,7 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB35_5: ; %end
+; SI-NEXT: .LBB35_4: ; %end
; SI-NEXT: v_readlane_b32 s87, v32, 31
; SI-NEXT: v_readlane_b32 s86, v32, 30
; SI-NEXT: v_readlane_b32 s85, v32, 29
@@ -23593,9 +22949,9 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s61, s61, s74
; GFX9-NEXT: s_pack_ll_b32_b16 s62, s62, s73
; GFX9-NEXT: s_pack_ll_b32_b16 s63, s63, s72
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
+; GFX9-NEXT: s_cbranch_execnz .LBB35_3
; GFX9-NEXT: .LBB35_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v27, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v27 op_sel_hi:[1,0]
@@ -23626,10 +22982,8 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v25, s61, v27 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v26, s62, v27 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v27, s63, v27 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB35_5
+; GFX9-NEXT: s_branch .LBB35_4
; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
-; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -23662,7 +23016,7 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB35_5: ; %end
+; GFX9-NEXT: .LBB35_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -23753,10 +23107,10 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s25, s58, s63
; GFX11-NEXT: s_pack_ll_b32_b16 s26, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s27, s44, s57
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-NEXT: .LBB35_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -23788,8 +23142,6 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v27, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
-; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -24033,7 +23385,7 @@ define inreg <14 x double> @bitcast_v14i64_to_v14f64_scalar(<14 x i64> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s42, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s43, v0
-; SI-NEXT: s_cbranch_scc0 .LBB37_4
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB37_3
; SI-NEXT: .LBB37_2: ; %cmp.true
@@ -24095,8 +23447,6 @@ define inreg <14 x double> @bitcast_v14i64_to_v14f64_scalar(<14 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v26, s7
; SI-NEXT: v_mov_b32_e32 v27, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_4:
-; SI-NEXT: s_branch .LBB37_2
;
; VI-LABEL: bitcast_v14i64_to_v14f64_scalar:
; VI: ; %bb.0:
@@ -24117,7 +23467,7 @@ define inreg <14 x double> @bitcast_v14i64_to_v14f64_scalar(<14 x i64> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s42, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s43, v0
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB37_3
; VI-NEXT: .LBB37_2: ; %cmp.true
@@ -24179,8 +23529,6 @@ define inreg <14 x double> @bitcast_v14i64_to_v14f64_scalar(<14 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v26, s7
; VI-NEXT: v_mov_b32_e32 v27, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v14i64_to_v14f64_scalar:
; GFX9: ; %bb.0:
@@ -24201,7 +23549,7 @@ define inreg <14 x double> @bitcast_v14i64_to_v14f64_scalar(<14 x i64> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s42, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s43, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB37_3
; GFX9-NEXT: .LBB37_2: ; %cmp.true
@@ -24263,8 +23611,6 @@ define inreg <14 x double> @bitcast_v14i64_to_v14f64_scalar(<14 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v26, s7
; GFX9-NEXT: v_mov_b32_e32 v27, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v14i64_to_v14f64_scalar:
; GFX11: ; %bb.0:
@@ -24282,7 +23628,7 @@ define inreg <14 x double> @bitcast_v14i64_to_v14f64_scalar(<14 x i64> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s13, v0
; GFX11-NEXT: s_cmp_lg_u32 s14, 0
; GFX11-NEXT: s_mov_b32 s14, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
@@ -24331,8 +23677,6 @@ define inreg <14 x double> @bitcast_v14i64_to_v14f64_scalar(<14 x i64> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v24, s7 :: v_dual_mov_b32 v25, s6
; GFX11-NEXT: v_dual_mov_b32 v26, s5 :: v_dual_mov_b32 v27, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_4:
-; GFX11-NEXT: s_branch .LBB37_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -24526,9 +23870,9 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB39_3
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_4
+; SI-NEXT: s_cbranch_execnz .LBB39_3
; SI-NEXT: .LBB39_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -24544,10 +23888,8 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; SI-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
; SI-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
; SI-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
-; SI-NEXT: s_branch .LBB39_5
+; SI-NEXT: s_branch .LBB39_4
; SI-NEXT: .LBB39_3:
-; SI-NEXT: s_branch .LBB39_2
-; SI-NEXT: .LBB39_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -24580,7 +23922,7 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB39_5: ; %end
+; SI-NEXT: .LBB39_4: ; %end
; SI-NEXT: v_readlane_b32 s55, v32, 11
; SI-NEXT: v_readlane_b32 s54, v32, 10
; SI-NEXT: v_readlane_b32 s53, v32, 9
@@ -24647,9 +23989,9 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB39_3
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_4
+; VI-NEXT: s_cbranch_execnz .LBB39_3
; VI-NEXT: .LBB39_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -24665,10 +24007,8 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; VI-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
; VI-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
; VI-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
-; VI-NEXT: s_branch .LBB39_5
+; VI-NEXT: s_branch .LBB39_4
; VI-NEXT: .LBB39_3:
-; VI-NEXT: s_branch .LBB39_2
-; VI-NEXT: .LBB39_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -24701,7 +24041,7 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB39_5: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: v_readlane_b32 s55, v32, 11
; VI-NEXT: v_readlane_b32 s54, v32, 10
; VI-NEXT: v_readlane_b32 s53, v32, 9
@@ -24768,9 +24108,9 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
+; GFX9-NEXT: s_cbranch_execnz .LBB39_3
; GFX9-NEXT: .LBB39_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -24786,10 +24126,8 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
; GFX9-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
; GFX9-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
-; GFX9-NEXT: s_branch .LBB39_5
+; GFX9-NEXT: s_branch .LBB39_4
; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
-; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -24822,7 +24160,7 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB39_5: ; %end
+; GFX9-NEXT: .LBB39_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -24890,10 +24228,10 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s54, v0
; GFX11-NEXT: v_writelane_b32 v32, s55, 11
; GFX11-NEXT: v_readfirstlane_b32 s55, v1
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_3
; GFX11-NEXT: .LBB39_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -24909,10 +24247,8 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[22:23], s[58:59], 1.0
; GFX11-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
; GFX11-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
-; GFX11-NEXT: s_branch .LBB39_5
+; GFX11-NEXT: s_branch .LBB39_4
; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
-; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -24929,7 +24265,7 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB39_5: ; %end
+; GFX11-NEXT: .LBB39_4: ; %end
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
; GFX11-NEXT: v_readlane_b32 s54, v32, 10
; GFX11-NEXT: v_readlane_b32 s53, v32, 9
@@ -25852,7 +25188,7 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; SI-NEXT: s_cmp_lg_u32 s40, 0
; SI-NEXT: v_readfirstlane_b32 s40, v0
; SI-NEXT: v_writelane_b32 v28, s53, 13
-; SI-NEXT: s_cbranch_scc0 .LBB41_4
+; SI-NEXT: s_cbranch_scc0 .LBB41_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s30, s5, 16
; SI-NEXT: s_lshr_b32 s31, s7, 16
@@ -26072,36 +25408,6 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB41_4:
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: s_branch .LBB41_2
;
; VI-LABEL: bitcast_v14i64_to_v56i16_scalar:
; VI: ; %bb.0:
@@ -26129,7 +25435,7 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s43, v0
; VI-NEXT: v_writelane_b32 v28, s35, 3
-; VI-NEXT: s_cbranch_scc0 .LBB41_4
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s44, s6, 16
; VI-NEXT: s_lshr_b32 s45, s7, 16
@@ -26339,36 +25645,6 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB41_4:
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: s_branch .LBB41_2
;
; GFX9-LABEL: bitcast_v14i64_to_v56i16_scalar:
; GFX9: ; %bb.0:
@@ -26389,7 +25665,7 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s42, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s43, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s44, s6, 16
; GFX9-NEXT: s_lshr_b32 s45, s7, 16
@@ -26535,36 +25811,6 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v26, s7
; GFX9-NEXT: v_mov_b32_e32 v27, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB41_4:
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: s_branch .LBB41_2
;
; GFX11-LABEL: bitcast_v14i64_to_v56i16_scalar:
; GFX11: ; %bb.0:
@@ -26582,7 +25828,7 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s13, v0
; GFX11-NEXT: s_cmp_lg_u32 s14, 0
; GFX11-NEXT: s_mov_b32 s90, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s14, s4, 16
; GFX11-NEXT: s_lshr_b32 s15, s5, 16
@@ -26716,36 +25962,6 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v24, s7 :: v_dual_mov_b32 v25, s6
; GFX11-NEXT: v_dual_mov_b32 v26, s5 :: v_dual_mov_b32 v27, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB41_4:
-; GFX11-NEXT: ; implicit-def: $sgpr89
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr79
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr77
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr75
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: s_branch .LBB41_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -28092,7 +27308,7 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; SI-NEXT: s_lshr_b32 s71, s80, 16
; SI-NEXT: v_readfirstlane_b32 s4, v14
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB43_4
+; SI-NEXT: s_cbranch_scc0 .LBB43_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s87, 16
@@ -28386,9 +27602,6 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB43_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB43_2
;
; VI-LABEL: bitcast_v56i16_to_v14i64_scalar:
; VI: ; %bb.0:
@@ -28949,9 +28162,9 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s61, s61, s74
; GFX9-NEXT: s_pack_ll_b32_b16 s62, s62, s73
; GFX9-NEXT: s_pack_ll_b32_b16 s63, s63, s72
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
+; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -28981,10 +28194,8 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v25, s61, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v26, s62, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v27, s63, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB43_5
+; GFX9-NEXT: s_branch .LBB43_4
; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
-; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -29017,7 +28228,7 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB43_5: ; %end
+; GFX9-NEXT: .LBB43_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -29108,10 +28319,10 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s25, s58, s63
; GFX11-NEXT: s_pack_ll_b32_b16 s26, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s27, s44, s57
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-NEXT: .LBB43_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -29143,8 +28354,6 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v27, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
-; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -30067,7 +29276,7 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; SI-NEXT: s_cmp_lg_u32 s40, 0
; SI-NEXT: v_readfirstlane_b32 s40, v0
; SI-NEXT: v_writelane_b32 v28, s53, 13
-; SI-NEXT: s_cbranch_scc0 .LBB45_4
+; SI-NEXT: s_cbranch_scc0 .LBB45_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s30, s5, 16
; SI-NEXT: s_lshr_b32 s31, s7, 16
@@ -30287,36 +29496,6 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB45_4:
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: s_branch .LBB45_2
;
; VI-LABEL: bitcast_v14i64_to_v56f16_scalar:
; VI: ; %bb.0:
@@ -30344,7 +29523,7 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s43, v0
; VI-NEXT: v_writelane_b32 v28, s35, 3
-; VI-NEXT: s_cbranch_scc0 .LBB45_4
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s44, s6, 16
; VI-NEXT: s_lshr_b32 s45, s7, 16
@@ -30554,36 +29733,6 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_4:
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: s_branch .LBB45_2
;
; GFX9-LABEL: bitcast_v14i64_to_v56f16_scalar:
; GFX9: ; %bb.0:
@@ -30604,7 +29753,7 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s42, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s43, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s44, s6, 16
; GFX9-NEXT: s_lshr_b32 s45, s7, 16
@@ -30750,36 +29899,6 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v26, s7
; GFX9-NEXT: v_mov_b32_e32 v27, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_4:
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: s_branch .LBB45_2
;
; GFX11-LABEL: bitcast_v14i64_to_v56f16_scalar:
; GFX11: ; %bb.0:
@@ -30797,7 +29916,7 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s13, v0
; GFX11-NEXT: s_cmp_lg_u32 s14, 0
; GFX11-NEXT: s_mov_b32 s90, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s14, s4, 16
; GFX11-NEXT: s_lshr_b32 s15, s5, 16
@@ -30931,36 +30050,6 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v24, s7 :: v_dual_mov_b32 v25, s6
; GFX11-NEXT: v_dual_mov_b32 v26, s5 :: v_dual_mov_b32 v27, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_4:
-; GFX11-NEXT: ; implicit-def: $sgpr89
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr79
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr77
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr75
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: s_branch .LBB45_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -32444,7 +31533,7 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s68, s34, 16
; SI-NEXT: v_readfirstlane_b32 s4, v14
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB47_3
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s87, 16
@@ -32530,7 +31619,7 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s63, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB47_4
+; SI-NEXT: s_cbranch_execnz .LBB47_3
; SI-NEXT: .LBB47_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s87
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -32756,11 +31845,8 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v26, v27, v26
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v28
; SI-NEXT: v_or_b32_e32 v27, v29, v27
-; SI-NEXT: s_branch .LBB47_5
+; SI-NEXT: s_branch .LBB47_4
; SI-NEXT: .LBB47_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB47_2
-; SI-NEXT: .LBB47_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -32793,7 +31879,7 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB47_5: ; %end
+; SI-NEXT: .LBB47_4: ; %end
; SI-NEXT: v_readlane_b32 s87, v32, 31
; SI-NEXT: v_readlane_b32 s86, v32, 30
; SI-NEXT: v_readlane_b32 s85, v32, 29
@@ -33304,9 +32390,9 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s61, s61, s74
; GFX9-NEXT: s_pack_ll_b32_b16 s62, s62, s73
; GFX9-NEXT: s_pack_ll_b32_b16 s63, s63, s72
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
+; GFX9-NEXT: s_cbranch_execnz .LBB47_3
; GFX9-NEXT: .LBB47_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v27, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v27 op_sel_hi:[1,0]
@@ -33337,10 +32423,8 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v25, s61, v27 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v26, s62, v27 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v27, s63, v27 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB47_5
+; GFX9-NEXT: s_branch .LBB47_4
; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
-; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -33373,7 +32457,7 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB47_5: ; %end
+; GFX9-NEXT: .LBB47_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -33464,10 +32548,10 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s25, s58, s63
; GFX11-NEXT: s_pack_ll_b32_b16 s26, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s27, s44, s57
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-NEXT: .LBB47_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -33499,8 +32583,6 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v27, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -34349,7 +33431,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: v_writelane_b32 v58, s53, 13
-; SI-NEXT: s_cbranch_scc0 .LBB49_3
+; SI-NEXT: s_cbranch_scc0 .LBB49_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s53, s41, 16
; SI-NEXT: s_lshr_b32 s52, s15, 16
@@ -34379,7 +33461,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[92:93], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB49_4
+; SI-NEXT: s_cbranch_execnz .LBB49_3
; SI-NEXT: .LBB49_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[26:27], s[40:41], 1.0
; SI-NEXT: v_add_f64 v[24:25], s[14:15], 1.0
@@ -34430,38 +33512,8 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v1
-; SI-NEXT: s_branch .LBB49_5
+; SI-NEXT: s_branch .LBB49_4
; SI-NEXT: .LBB49_3:
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: s_branch .LBB49_2
-; SI-NEXT: .LBB49_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -34525,7 +33577,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v30, s46
; SI-NEXT: v_mov_b32_e32 v29, s44
; SI-NEXT: v_mov_b32_e32 v28, s42
-; SI-NEXT: .LBB49_5: ; %end
+; SI-NEXT: .LBB49_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v50
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v0, v39
@@ -34674,7 +33726,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: v_writelane_b32 v56, s35, 3
-; VI-NEXT: s_cbranch_scc0 .LBB49_3
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s44, s9, 16
; VI-NEXT: s_lshr_b32 s74, s8, 16
@@ -34704,7 +33756,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; VI-NEXT: s_lshr_b32 s34, s18, 16
; VI-NEXT: s_lshr_b32 s73, s17, 16
; VI-NEXT: s_lshr_b32 s35, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB49_4
+; VI-NEXT: s_cbranch_execnz .LBB49_3
; VI-NEXT: .LBB49_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[26:27], s[8:9], 1.0
; VI-NEXT: v_add_f64 v[24:25], s[10:11], 1.0
@@ -34748,38 +33800,8 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v42, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v47, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v44, 16, v0
-; VI-NEXT: s_branch .LBB49_5
+; VI-NEXT: s_branch .LBB49_4
; VI-NEXT: .LBB49_3:
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: s_branch .LBB49_2
-; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -34836,7 +33858,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v35, s46
; VI-NEXT: v_mov_b32_e32 v33, s45
; VI-NEXT: v_mov_b32_e32 v31, s44
-; VI-NEXT: .LBB49_5: ; %end
+; VI-NEXT: .LBB49_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v44, 16, v44
; VI-NEXT: v_lshlrev_b32_e32 v42, 16, v42
; VI-NEXT: v_lshlrev_b32_e32 v40, 16, v40
@@ -34938,7 +33960,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s44, s9, 16
; GFX9-NEXT: s_lshr_b32 s74, s8, 16
@@ -34968,7 +33990,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX9-NEXT: s_lshr_b32 s94, s18, 16
; GFX9-NEXT: s_lshr_b32 s73, s17, 16
; GFX9-NEXT: s_lshr_b32 s95, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
+; GFX9-NEXT: s_cbranch_execnz .LBB49_3
; GFX9-NEXT: .LBB49_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[26:27], s[8:9], 1.0
; GFX9-NEXT: v_add_f64 v[24:25], s[10:11], 1.0
@@ -35012,38 +34034,8 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v47, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v0
-; GFX9-NEXT: s_branch .LBB49_5
+; GFX9-NEXT: s_branch .LBB49_4
; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: s_branch .LBB49_2
-; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -35100,7 +34092,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v35, s46
; GFX9-NEXT: v_mov_b32_e32 v33, s45
; GFX9-NEXT: v_mov_b32_e32 v31, s44
-; GFX9-NEXT: .LBB49_5: ; %end
+; GFX9-NEXT: .LBB49_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -35184,7 +34176,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s12, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s14, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s14, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s11, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s61, s10, 16
@@ -35215,7 +34207,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s60, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s90, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f64 v[26:27], s[10:11], 1.0
; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], s[8:9], 1.0
@@ -35259,38 +34251,8 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB49_5
+; GFX11-TRUE16-NEXT: s_branch .LBB49_4
; GFX11-TRUE16-NEXT: .LBB49_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr89
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr79
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB49_2
-; GFX11-TRUE16-NEXT: .LBB49_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -35319,7 +34281,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, s44 :: v_dual_mov_b32 v37, s43
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s42 :: v_dual_mov_b32 v33, s41
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s40 :: v_dual_mov_b32 v30, s15
-; GFX11-TRUE16-NEXT: .LBB49_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB49_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v50, v50
@@ -35382,7 +34344,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s12, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s14, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s14, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s11, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s61, s10, 16
@@ -35413,7 +34375,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s60, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s90, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f64 v[23:24], s[10:11], 1.0
; GFX11-FAKE16-NEXT: v_add_f64 v[25:26], s[8:9], 1.0
@@ -35457,38 +34419,8 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v31
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v1
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-FAKE16-NEXT: s_branch .LBB49_5
+; GFX11-FAKE16-NEXT: s_branch .LBB49_4
; GFX11-FAKE16-NEXT: .LBB49_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr89
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr79
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB49_2
-; GFX11-FAKE16-NEXT: .LBB49_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v31, s2
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v10, s20
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s18 :: v_dual_mov_b32 v8, s22
@@ -35517,7 +34449,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v51, s44 :: v_dual_mov_b32 v48, s42
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v49, s43 :: v_dual_mov_b32 v38, s41
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v36, s40 :: v_dual_mov_b32 v35, s15
-; GFX11-FAKE16-NEXT: .LBB49_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB49_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -36921,7 +35853,7 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; SI-NEXT: s_lshr_b32 s71, s80, 16
; SI-NEXT: v_readfirstlane_b32 s4, v14
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB51_4
+; SI-NEXT: s_cbranch_scc0 .LBB51_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s87, 16
@@ -37215,9 +36147,6 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB51_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB51_2
;
; VI-LABEL: bitcast_v56i16_to_v14f64_scalar:
; VI: ; %bb.0:
@@ -37778,9 +36707,9 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s61, s61, s74
; GFX9-NEXT: s_pack_ll_b32_b16 s62, s62, s73
; GFX9-NEXT: s_pack_ll_b32_b16 s63, s63, s72
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB51_4
+; GFX9-NEXT: s_cbranch_execnz .LBB51_3
; GFX9-NEXT: .LBB51_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -37810,10 +36739,8 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v25, s61, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v26, s62, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v27, s63, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB51_5
+; GFX9-NEXT: s_branch .LBB51_4
; GFX9-NEXT: .LBB51_3:
-; GFX9-NEXT: s_branch .LBB51_2
-; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -37846,7 +36773,7 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB51_5: ; %end
+; GFX9-NEXT: .LBB51_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -37937,10 +36864,10 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s25, s58, s63
; GFX11-NEXT: s_pack_ll_b32_b16 s26, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s27, s44, s57
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-NEXT: .LBB51_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -37972,8 +36899,6 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v27, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
-; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -38822,7 +37747,7 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: v_writelane_b32 v58, s53, 13
-; SI-NEXT: s_cbranch_scc0 .LBB53_3
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s53, s41, 16
; SI-NEXT: s_lshr_b32 s52, s15, 16
@@ -38852,7 +37777,7 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[92:93], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB53_4
+; SI-NEXT: s_cbranch_execnz .LBB53_3
; SI-NEXT: .LBB53_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[26:27], s[40:41], 1.0
; SI-NEXT: v_add_f64 v[24:25], s[14:15], 1.0
@@ -38903,38 +37828,8 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v1
-; SI-NEXT: s_branch .LBB53_5
+; SI-NEXT: s_branch .LBB53_4
; SI-NEXT: .LBB53_3:
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr31
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: s_branch .LBB53_2
-; SI-NEXT: .LBB53_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -38998,7 +37893,7 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; SI-NEXT: v_mov_b32_e32 v30, s46
; SI-NEXT: v_mov_b32_e32 v29, s44
; SI-NEXT: v_mov_b32_e32 v28, s42
-; SI-NEXT: .LBB53_5: ; %end
+; SI-NEXT: .LBB53_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v50
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v0, v39
@@ -39147,7 +38042,7 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: v_writelane_b32 v56, s35, 3
-; VI-NEXT: s_cbranch_scc0 .LBB53_3
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s44, s9, 16
; VI-NEXT: s_lshr_b32 s74, s8, 16
@@ -39177,7 +38072,7 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; VI-NEXT: s_lshr_b32 s34, s18, 16
; VI-NEXT: s_lshr_b32 s73, s17, 16
; VI-NEXT: s_lshr_b32 s35, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB53_4
+; VI-NEXT: s_cbranch_execnz .LBB53_3
; VI-NEXT: .LBB53_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[26:27], s[8:9], 1.0
; VI-NEXT: v_add_f64 v[24:25], s[10:11], 1.0
@@ -39221,38 +38116,8 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; VI-NEXT: v_lshrrev_b32_e32 v42, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v47, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v44, 16, v0
-; VI-NEXT: s_branch .LBB53_5
+; VI-NEXT: s_branch .LBB53_4
; VI-NEXT: .LBB53_3:
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr45
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: s_branch .LBB53_2
-; VI-NEXT: .LBB53_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -39309,7 +38174,7 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; VI-NEXT: v_mov_b32_e32 v35, s46
; VI-NEXT: v_mov_b32_e32 v33, s45
; VI-NEXT: v_mov_b32_e32 v31, s44
-; VI-NEXT: .LBB53_5: ; %end
+; VI-NEXT: .LBB53_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v44, 16, v44
; VI-NEXT: v_lshlrev_b32_e32 v42, 16, v42
; VI-NEXT: v_lshlrev_b32_e32 v40, 16, v40
@@ -39411,7 +38276,7 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s44, s9, 16
; GFX9-NEXT: s_lshr_b32 s74, s8, 16
@@ -39441,7 +38306,7 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX9-NEXT: s_lshr_b32 s94, s18, 16
; GFX9-NEXT: s_lshr_b32 s73, s17, 16
; GFX9-NEXT: s_lshr_b32 s95, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB53_4
+; GFX9-NEXT: s_cbranch_execnz .LBB53_3
; GFX9-NEXT: .LBB53_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[26:27], s[8:9], 1.0
; GFX9-NEXT: v_add_f64 v[24:25], s[10:11], 1.0
@@ -39485,38 +38350,8 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v47, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v0
-; GFX9-NEXT: s_branch .LBB53_5
+; GFX9-NEXT: s_branch .LBB53_4
; GFX9-NEXT: .LBB53_3:
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr45
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: s_branch .LBB53_2
-; GFX9-NEXT: .LBB53_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -39573,7 +38408,7 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v35, s46
; GFX9-NEXT: v_mov_b32_e32 v33, s45
; GFX9-NEXT: v_mov_b32_e32 v31, s44
-; GFX9-NEXT: .LBB53_5: ; %end
+; GFX9-NEXT: .LBB53_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -39657,7 +38492,7 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s12, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s14, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s14, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s11, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s61, s10, 16
@@ -39688,7 +38523,7 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX11-TRUE16-NEXT: s_lshr_b32 s60, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s90, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f64 v[26:27], s[10:11], 1.0
; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], s[8:9], 1.0
@@ -39732,38 +38567,8 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB53_5
+; GFX11-TRUE16-NEXT: s_branch .LBB53_4
; GFX11-TRUE16-NEXT: .LBB53_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr89
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr79
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB53_2
-; GFX11-TRUE16-NEXT: .LBB53_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -39792,7 +38597,7 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, s44 :: v_dual_mov_b32 v37, s43
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s42 :: v_dual_mov_b32 v33, s41
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s40 :: v_dual_mov_b32 v30, s15
-; GFX11-TRUE16-NEXT: .LBB53_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB53_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v50, v50
@@ -39855,7 +38660,7 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s12, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s14, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s14, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s11, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s61, s10, 16
@@ -39886,7 +38691,7 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX11-FAKE16-NEXT: s_lshr_b32 s60, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s90, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f64 v[23:24], s[10:11], 1.0
; GFX11-FAKE16-NEXT: v_add_f64 v[25:26], s[8:9], 1.0
@@ -39930,38 +38735,8 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v31
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v1
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-FAKE16-NEXT: s_branch .LBB53_5
+; GFX11-FAKE16-NEXT: s_branch .LBB53_4
; GFX11-FAKE16-NEXT: .LBB53_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr89
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr79
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB53_2
-; GFX11-FAKE16-NEXT: .LBB53_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v31, s2
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v10, s20
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s18 :: v_dual_mov_b32 v8, s22
@@ -39990,7 +38765,7 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v51, s44 :: v_dual_mov_b32 v48, s42
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v49, s43 :: v_dual_mov_b32 v38, s41
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v36, s40 :: v_dual_mov_b32 v35, s15
-; GFX11-FAKE16-NEXT: .LBB53_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB53_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -41531,7 +40306,7 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; SI-NEXT: s_lshr_b32 s68, s34, 16
; SI-NEXT: v_readfirstlane_b32 s4, v14
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB55_3
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s87, 16
@@ -41617,7 +40392,7 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s63, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB55_4
+; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s87
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -41843,11 +40618,8 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; SI-NEXT: v_or_b32_e32 v26, v27, v26
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v28
; SI-NEXT: v_or_b32_e32 v27, v29, v27
-; SI-NEXT: s_branch .LBB55_5
+; SI-NEXT: s_branch .LBB55_4
; SI-NEXT: .LBB55_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB55_2
-; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -41880,7 +40652,7 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB55_5: ; %end
+; SI-NEXT: .LBB55_4: ; %end
; SI-NEXT: v_readlane_b32 s87, v32, 31
; SI-NEXT: v_readlane_b32 s86, v32, 30
; SI-NEXT: v_readlane_b32 s85, v32, 29
@@ -42391,9 +41163,9 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX9-NEXT: s_pack_ll_b32_b16 s61, s61, s74
; GFX9-NEXT: s_pack_ll_b32_b16 s62, s62, s73
; GFX9-NEXT: s_pack_ll_b32_b16 s63, s63, s72
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
+; GFX9-NEXT: s_cbranch_execnz .LBB55_3
; GFX9-NEXT: .LBB55_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v27, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v27 op_sel_hi:[1,0]
@@ -42424,10 +41196,8 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX9-NEXT: v_pk_add_f16 v25, s61, v27 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v26, s62, v27 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v27, s63, v27 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB55_5
+; GFX9-NEXT: s_branch .LBB55_4
; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
-; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -42460,7 +41230,7 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB55_5: ; %end
+; GFX9-NEXT: .LBB55_4: ; %end
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
; GFX9-NEXT: v_readlane_b32 s54, v32, 10
; GFX9-NEXT: v_readlane_b32 s53, v32, 9
@@ -42551,10 +41321,10 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-NEXT: s_pack_ll_b32_b16 s25, s58, s63
; GFX11-NEXT: s_pack_ll_b32_b16 s26, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s27, s44, s57
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-NEXT: .LBB55_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -42586,8 +41356,6 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-NEXT: v_pk_add_f16 v27, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
-; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -44663,7 +43431,7 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; VI-NEXT: s_lshr_b32 s91, s90, 16
; VI-NEXT: v_readfirstlane_b32 s4, v14
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB57_3
; VI-NEXT: .LBB57_2: ; %cmp.true
@@ -44845,8 +43613,6 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v56i16_to_v56f16_scalar:
; GFX9: ; %bb.0:
@@ -44903,9 +43669,9 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
+; GFX9-NEXT: s_cbranch_execnz .LBB57_3
; GFX9-NEXT: .LBB57_2: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s95, s73
; GFX9-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
@@ -44991,10 +43757,8 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v25
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v26
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX9-NEXT: s_branch .LBB57_5
+; GFX9-NEXT: s_branch .LBB57_4
; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
-; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v27, s95
; GFX9-NEXT: v_mov_b32_e32 v26, s94
; GFX9-NEXT: v_mov_b32_e32 v25, s93
@@ -45051,7 +43815,7 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v45, s8
; GFX9-NEXT: v_mov_b32_e32 v46, s7
; GFX9-NEXT: v_mov_b32_e32 v47, s6
-; GFX9-NEXT: .LBB57_5: ; %end
+; GFX9-NEXT: .LBB57_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -45163,10 +43927,10 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s72, 16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s90, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s90, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s90
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s59, s79, s59
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s57, s76, s57
@@ -45252,10 +44016,8 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX11-TRUE16-NEXT: s_branch .LBB57_5
+; GFX11-TRUE16-NEXT: s_branch .LBB57_4
; GFX11-TRUE16-NEXT: .LBB57_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB57_2
-; GFX11-TRUE16-NEXT: .LBB57_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, s79 :: v_dual_mov_b32 v26, s89
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, s88 :: v_dual_mov_b32 v24, s78
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, s76 :: v_dual_mov_b32 v22, s74
@@ -45284,7 +44046,7 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v66, s9 :: v_dual_mov_b32 v67, s6
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v68, s8 :: v_dual_mov_b32 v69, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v70, s4 :: v_dual_mov_b32 v71, s5
-; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB57_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
@@ -45374,10 +44136,10 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s72, 16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s90, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s90, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s90
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-FAKE16-NEXT: .LBB57_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s59, s79, s59
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s57, s76, s57
@@ -45463,10 +44225,8 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v24
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
-; GFX11-FAKE16-NEXT: s_branch .LBB57_5
+; GFX11-FAKE16-NEXT: s_branch .LBB57_4
; GFX11-FAKE16-NEXT: .LBB57_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB57_2
-; GFX11-FAKE16-NEXT: .LBB57_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s79 :: v_dual_mov_b32 v24, s89
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s88 :: v_dual_mov_b32 v26, s78
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s76 :: v_dual_mov_b32 v18, s74
@@ -45495,7 +44255,7 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v66, s9 :: v_dual_mov_b32 v67, s6
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v68, s8 :: v_dual_mov_b32 v69, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v70, s4 :: v_dual_mov_b32 v71, s5
-; GFX11-FAKE16-NEXT: .LBB57_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB57_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v80, 0xffff, v0
@@ -46592,9 +45352,9 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
-; SI-NEXT: s_cbranch_scc0 .LBB59_3
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB59_4
+; SI-NEXT: s_cbranch_execnz .LBB59_3
; SI-NEXT: .LBB59_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s95
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -46855,10 +45615,8 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v31, v29
; SI-NEXT: v_lshr_b64 v[28:29], v[26:27], 16
; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
-; SI-NEXT: s_branch .LBB59_5
+; SI-NEXT: s_branch .LBB59_4
; SI-NEXT: .LBB59_3:
-; SI-NEXT: s_branch .LBB59_2
-; SI-NEXT: .LBB59_4:
; SI-NEXT: v_mov_b32_e32 v52, s72
; SI-NEXT: v_mov_b32_e32 v31, s61
; SI-NEXT: s_waitcnt expcnt(6)
@@ -46919,7 +45677,7 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v32, s59
; SI-NEXT: v_mov_b32_e32 v30, s57
; SI-NEXT: v_mov_b32_e32 v28, s46
-; SI-NEXT: .LBB59_5: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v46
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
; SI-NEXT: v_or_b32_e32 v0, v2, v0
@@ -47088,9 +45846,9 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
+; VI-NEXT: s_cbranch_execnz .LBB59_3
; VI-NEXT: .LBB59_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v28, 0x200
; VI-NEXT: v_add_f16_e32 v0, s16, v28
@@ -47149,10 +45907,8 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v29, s47, v28
; VI-NEXT: v_add_f16_e32 v27, s44, v28
; VI-NEXT: v_add_f16_e32 v28, s45, v28
-; VI-NEXT: s_branch .LBB59_5
+; VI-NEXT: s_branch .LBB59_4
; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
-; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v28, s45
; VI-NEXT: v_mov_b32_e32 v27, s44
; VI-NEXT: v_mov_b32_e32 v29, s47
@@ -47209,7 +45965,7 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v47, s43
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: .LBB59_5: ; %end
+; VI-NEXT: .LBB59_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v47, 16, v47
; VI-NEXT: v_lshlrev_b32_e32 v46, 16, v46
; VI-NEXT: v_lshlrev_b32_e32 v45, 16, v45
@@ -47339,9 +46095,9 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
+; GFX9-NEXT: s_cbranch_execnz .LBB59_3
; GFX9-NEXT: .LBB59_2: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s95, s73
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
@@ -47428,10 +46184,8 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v25
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v26
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX9-NEXT: s_branch .LBB59_5
+; GFX9-NEXT: s_branch .LBB59_4
; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
-; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v27, s95
; GFX9-NEXT: v_mov_b32_e32 v26, s94
; GFX9-NEXT: v_mov_b32_e32 v25, s93
@@ -47488,7 +46242,7 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v45, s8
; GFX9-NEXT: v_mov_b32_e32 v46, s7
; GFX9-NEXT: v_mov_b32_e32 v47, s6
-; GFX9-NEXT: .LBB59_5: ; %end
+; GFX9-NEXT: .LBB59_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -47600,10 +46354,10 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s72, 16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s90, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s90, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s90
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s59, s79, s59
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s57, s76, s57
@@ -47689,10 +46443,8 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX11-TRUE16-NEXT: s_branch .LBB59_5
+; GFX11-TRUE16-NEXT: s_branch .LBB59_4
; GFX11-TRUE16-NEXT: .LBB59_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB59_2
-; GFX11-TRUE16-NEXT: .LBB59_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, s79 :: v_dual_mov_b32 v26, s89
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, s88 :: v_dual_mov_b32 v24, s78
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, s76 :: v_dual_mov_b32 v22, s74
@@ -47721,7 +46473,7 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v66, s9 :: v_dual_mov_b32 v67, s6
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v68, s8 :: v_dual_mov_b32 v69, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v70, s4 :: v_dual_mov_b32 v71, s5
-; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB59_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
@@ -47811,10 +46563,10 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s72, 16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s90, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s90, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s90
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-FAKE16-NEXT: .LBB59_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s59, s79, s59
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s57, s76, s57
@@ -47900,10 +46652,8 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v24
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
-; GFX11-FAKE16-NEXT: s_branch .LBB59_5
+; GFX11-FAKE16-NEXT: s_branch .LBB59_4
; GFX11-FAKE16-NEXT: .LBB59_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB59_2
-; GFX11-FAKE16-NEXT: .LBB59_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s79 :: v_dual_mov_b32 v24, s89
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s88 :: v_dual_mov_b32 v26, s78
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s76 :: v_dual_mov_b32 v18, s74
@@ -47932,7 +46682,7 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v66, s9 :: v_dual_mov_b32 v67, s6
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v68, s8 :: v_dual_mov_b32 v69, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v70, s4 :: v_dual_mov_b32 v71, s5
-; GFX11-FAKE16-NEXT: .LBB59_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB59_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v80, 0xffff, v0
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
index ab6653a3d5e37..a8f6457f26518 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
@@ -218,7 +218,7 @@ define inreg <30 x float> @bitcast_v30i32_to_v30f32_scalar(<30 x i32> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s44, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s45, v0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -284,8 +284,6 @@ define inreg <30 x float> @bitcast_v30i32_to_v30f32_scalar(<30 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v28, s7
; SI-NEXT: v_mov_b32_e32 v29, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v30i32_to_v30f32_scalar:
; VI: ; %bb.0:
@@ -308,7 +306,7 @@ define inreg <30 x float> @bitcast_v30i32_to_v30f32_scalar(<30 x i32> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s44, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s45, v0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -374,8 +372,6 @@ define inreg <30 x float> @bitcast_v30i32_to_v30f32_scalar(<30 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v28, s7
; VI-NEXT: v_mov_b32_e32 v29, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v30i32_to_v30f32_scalar:
; GFX9: ; %bb.0:
@@ -398,7 +394,7 @@ define inreg <30 x float> @bitcast_v30i32_to_v30f32_scalar(<30 x i32> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s44, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s45, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -464,8 +460,6 @@ define inreg <30 x float> @bitcast_v30i32_to_v30f32_scalar(<30 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v28, s7
; GFX9-NEXT: v_mov_b32_e32 v29, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v30i32_to_v30f32_scalar:
; GFX11: ; %bb.0:
@@ -485,7 +479,7 @@ define inreg <30 x float> @bitcast_v30i32_to_v30f32_scalar(<30 x i32> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s15, v0
; GFX11-NEXT: s_cmp_lg_u32 s40, 0
; GFX11-NEXT: s_mov_b32 s40, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -538,8 +532,6 @@ define inreg <30 x float> @bitcast_v30i32_to_v30f32_scalar(<30 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s7 :: v_dual_mov_b32 v27, s6
; GFX11-NEXT: v_dual_mov_b32 v28, s5 :: v_dual_mov_b32 v29, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -786,9 +778,9 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v29, s65, 1.0
; SI-NEXT: v_add_f32_e64 v28, s64, 1.0
@@ -820,10 +812,8 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB3_5
+; SI-NEXT: s_branch .LBB3_4
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -856,7 +846,7 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB3_5: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: v_readlane_b32 s65, v32, 13
; SI-NEXT: v_readlane_b32 s64, v32, 12
; SI-NEXT: v_readlane_b32 s55, v32, 11
@@ -929,9 +919,9 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v29, s65, 1.0
; VI-NEXT: v_add_f32_e64 v28, s64, 1.0
@@ -963,10 +953,8 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB3_5
+; VI-NEXT: s_branch .LBB3_4
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -999,7 +987,7 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB3_5: ; %end
+; VI-NEXT: .LBB3_4: ; %end
; VI-NEXT: v_readlane_b32 s65, v32, 13
; VI-NEXT: v_readlane_b32 s64, v32, 12
; VI-NEXT: v_readlane_b32 s55, v32, 11
@@ -1072,9 +1060,9 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v29, s65, 1.0
; GFX9-NEXT: v_add_f32_e64 v28, s64, 1.0
@@ -1106,10 +1094,8 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB3_5
+; GFX9-NEXT: s_branch .LBB3_4
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -1142,7 +1128,7 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB3_5: ; %end
+; GFX9-NEXT: .LBB3_4: ; %end
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
; GFX9-NEXT: v_readlane_b32 s64, v32, 12
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
@@ -1216,10 +1202,10 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s64, v10
; GFX11-NEXT: v_writelane_b32 v32, s65, 13
; GFX11-NEXT: v_readfirstlane_b32 s65, v11
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v29, s65, 1.0
; GFX11-NEXT: v_add_f32_e64 v28, s64, 1.0
@@ -1251,10 +1237,8 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB3_5
+; GFX11-NEXT: s_branch .LBB3_4
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -1271,7 +1255,7 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB3_5: ; %end
+; GFX11-NEXT: .LBB3_4: ; %end
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
; GFX11-NEXT: v_readlane_b32 s64, v32, 12
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
@@ -1520,7 +1504,7 @@ define inreg <15 x i64> @bitcast_v30i32_to_v15i64_scalar(<30 x i32> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s44, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s45, v0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB5_3
; SI-NEXT: .LBB5_2: ; %cmp.true
@@ -1586,8 +1570,6 @@ define inreg <15 x i64> @bitcast_v30i32_to_v15i64_scalar(<30 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v28, s7
; SI-NEXT: v_mov_b32_e32 v29, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v30i32_to_v15i64_scalar:
; VI: ; %bb.0:
@@ -1610,7 +1592,7 @@ define inreg <15 x i64> @bitcast_v30i32_to_v15i64_scalar(<30 x i32> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s44, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s45, v0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB5_3
; VI-NEXT: .LBB5_2: ; %cmp.true
@@ -1676,8 +1658,6 @@ define inreg <15 x i64> @bitcast_v30i32_to_v15i64_scalar(<30 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v28, s7
; VI-NEXT: v_mov_b32_e32 v29, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v30i32_to_v15i64_scalar:
; GFX9: ; %bb.0:
@@ -1700,7 +1680,7 @@ define inreg <15 x i64> @bitcast_v30i32_to_v15i64_scalar(<30 x i32> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s44, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s45, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB5_3
; GFX9-NEXT: .LBB5_2: ; %cmp.true
@@ -1766,8 +1746,6 @@ define inreg <15 x i64> @bitcast_v30i32_to_v15i64_scalar(<30 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v28, s7
; GFX9-NEXT: v_mov_b32_e32 v29, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v30i32_to_v15i64_scalar:
; GFX11: ; %bb.0:
@@ -1787,7 +1765,7 @@ define inreg <15 x i64> @bitcast_v30i32_to_v15i64_scalar(<30 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s15, v0
; GFX11-NEXT: s_cmp_lg_u32 s40, 0
; GFX11-NEXT: s_mov_b32 s40, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
@@ -1840,8 +1818,6 @@ define inreg <15 x i64> @bitcast_v30i32_to_v15i64_scalar(<30 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v26, s7 :: v_dual_mov_b32 v27, s6
; GFX11-NEXT: v_dual_mov_b32 v28, s5 :: v_dual_mov_b32 v29, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2079,7 +2055,7 @@ define inreg <30 x i32> @bitcast_v15i64_to_v30i32_scalar(<15 x i64> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s44, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s45, v0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB7_3
; SI-NEXT: .LBB7_2: ; %cmp.true
@@ -2145,8 +2121,6 @@ define inreg <30 x i32> @bitcast_v15i64_to_v30i32_scalar(<15 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v28, s7
; SI-NEXT: v_mov_b32_e32 v29, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v15i64_to_v30i32_scalar:
; VI: ; %bb.0:
@@ -2169,7 +2143,7 @@ define inreg <30 x i32> @bitcast_v15i64_to_v30i32_scalar(<15 x i64> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s44, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s45, v0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB7_3
; VI-NEXT: .LBB7_2: ; %cmp.true
@@ -2235,8 +2209,6 @@ define inreg <30 x i32> @bitcast_v15i64_to_v30i32_scalar(<15 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v28, s7
; VI-NEXT: v_mov_b32_e32 v29, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v15i64_to_v30i32_scalar:
; GFX9: ; %bb.0:
@@ -2259,7 +2231,7 @@ define inreg <30 x i32> @bitcast_v15i64_to_v30i32_scalar(<15 x i64> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s44, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s45, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB7_3
; GFX9-NEXT: .LBB7_2: ; %cmp.true
@@ -2325,8 +2297,6 @@ define inreg <30 x i32> @bitcast_v15i64_to_v30i32_scalar(<15 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v28, s7
; GFX9-NEXT: v_mov_b32_e32 v29, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v15i64_to_v30i32_scalar:
; GFX11: ; %bb.0:
@@ -2346,7 +2316,7 @@ define inreg <30 x i32> @bitcast_v15i64_to_v30i32_scalar(<15 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s15, v0
; GFX11-NEXT: s_cmp_lg_u32 s40, 0
; GFX11-NEXT: s_mov_b32 s40, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
@@ -2399,8 +2369,6 @@ define inreg <30 x i32> @bitcast_v15i64_to_v30i32_scalar(<15 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v26, s7 :: v_dual_mov_b32 v27, s6
; GFX11-NEXT: v_dual_mov_b32 v28, s5 :: v_dual_mov_b32 v29, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2630,7 +2598,7 @@ define inreg <15 x double> @bitcast_v30i32_to_v15f64_scalar(<30 x i32> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s44, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s45, v0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB9_3
; SI-NEXT: .LBB9_2: ; %cmp.true
@@ -2696,8 +2664,6 @@ define inreg <15 x double> @bitcast_v30i32_to_v15f64_scalar(<30 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v28, s7
; SI-NEXT: v_mov_b32_e32 v29, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v30i32_to_v15f64_scalar:
; VI: ; %bb.0:
@@ -2720,7 +2686,7 @@ define inreg <15 x double> @bitcast_v30i32_to_v15f64_scalar(<30 x i32> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s44, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s45, v0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -2786,8 +2752,6 @@ define inreg <15 x double> @bitcast_v30i32_to_v15f64_scalar(<30 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v28, s7
; VI-NEXT: v_mov_b32_e32 v29, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v30i32_to_v15f64_scalar:
; GFX9: ; %bb.0:
@@ -2810,7 +2774,7 @@ define inreg <15 x double> @bitcast_v30i32_to_v15f64_scalar(<30 x i32> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s44, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s45, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -2876,8 +2840,6 @@ define inreg <15 x double> @bitcast_v30i32_to_v15f64_scalar(<30 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v28, s7
; GFX9-NEXT: v_mov_b32_e32 v29, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v30i32_to_v15f64_scalar:
; GFX11: ; %bb.0:
@@ -2897,7 +2859,7 @@ define inreg <15 x double> @bitcast_v30i32_to_v15f64_scalar(<30 x i32> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s15, v0
; GFX11-NEXT: s_cmp_lg_u32 s40, 0
; GFX11-NEXT: s_mov_b32 s40, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -2950,8 +2912,6 @@ define inreg <15 x double> @bitcast_v30i32_to_v15f64_scalar(<30 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s7 :: v_dual_mov_b32 v27, s6
; GFX11-NEXT: v_dual_mov_b32 v28, s5 :: v_dual_mov_b32 v29, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3153,9 +3113,9 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_4
+; SI-NEXT: s_cbranch_execnz .LBB11_3
; SI-NEXT: .LBB11_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
; SI-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
@@ -3172,10 +3132,8 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; SI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; SI-NEXT: s_branch .LBB11_5
+; SI-NEXT: s_branch .LBB11_4
; SI-NEXT: .LBB11_3:
-; SI-NEXT: s_branch .LBB11_2
-; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -3208,7 +3166,7 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB11_5: ; %end
+; SI-NEXT: .LBB11_4: ; %end
; SI-NEXT: v_readlane_b32 s65, v32, 13
; SI-NEXT: v_readlane_b32 s64, v32, 12
; SI-NEXT: v_readlane_b32 s55, v32, 11
@@ -3281,9 +3239,9 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
; VI-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
@@ -3300,10 +3258,8 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; VI-NEXT: s_branch .LBB11_5
+; VI-NEXT: s_branch .LBB11_4
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -3336,7 +3292,7 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB11_5: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: v_readlane_b32 s65, v32, 13
; VI-NEXT: v_readlane_b32 s64, v32, 12
; VI-NEXT: v_readlane_b32 s55, v32, 11
@@ -3409,9 +3365,9 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
; GFX9-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
@@ -3428,10 +3384,8 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX9-NEXT: s_branch .LBB11_5
+; GFX9-NEXT: s_branch .LBB11_4
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -3464,7 +3418,7 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB11_5: ; %end
+; GFX9-NEXT: .LBB11_4: ; %end
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
; GFX9-NEXT: v_readlane_b32 s64, v32, 12
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
@@ -3538,10 +3492,10 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s64, v10
; GFX11-NEXT: v_writelane_b32 v32, s65, 13
; GFX11-NEXT: v_readfirstlane_b32 s65, v11
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-NEXT: .LBB11_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
; GFX11-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
@@ -3558,10 +3512,8 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX11-NEXT: s_branch .LBB11_5
+; GFX11-NEXT: s_branch .LBB11_4
; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -3578,7 +3530,7 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB11_5: ; %end
+; GFX11-NEXT: .LBB11_4: ; %end
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
; GFX11-NEXT: v_readlane_b32 s64, v32, 12
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
@@ -4566,7 +4518,7 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; SI-NEXT: s_cmp_lg_u32 s42, 0
; SI-NEXT: v_readfirstlane_b32 s42, v0
; SI-NEXT: v_writelane_b32 v30, s64, 16
-; SI-NEXT: s_cbranch_scc0 .LBB13_4
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s34, s5, 16
; SI-NEXT: s_lshr_b32 s35, s7, 16
@@ -4803,38 +4755,6 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB13_4:
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: s_branch .LBB13_2
;
; VI-LABEL: bitcast_v30i32_to_v60i16_scalar:
; VI: ; %bb.0:
@@ -4868,7 +4788,7 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s45, v0
; VI-NEXT: v_writelane_b32 v30, s39, 7
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s46, s6, 16
; VI-NEXT: s_lshr_b32 s47, s7, 16
@@ -5096,38 +5016,6 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: ; implicit-def: $sgpr39
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr37
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v30i32_to_v60i16_scalar:
; GFX9: ; %bb.0:
@@ -5157,7 +5045,7 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s45, v0
; GFX9-NEXT: v_writelane_b32 v30, s35, 3
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s46, s6, 16
; GFX9-NEXT: s_lshr_b32 s47, s7, 16
@@ -5321,38 +5209,6 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: ; implicit-def: $sgpr35
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr31
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v30i32_to_v60i16_scalar:
; GFX11: ; %bb.0:
@@ -5372,7 +5228,7 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s15, v0
; GFX11-NEXT: s_cmp_lg_u32 s40, 0
; GFX11-NEXT: s_mov_b32 s94, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s40, s4, 16
; GFX11-NEXT: s_lshr_b32 s41, s5, 16
@@ -5515,38 +5371,6 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v26, s7 :: v_dual_mov_b32 v27, s6
; GFX11-NEXT: v_dual_mov_b32 v28, s5 :: v_dual_mov_b32 v29, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: ; implicit-def: $sgpr93
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr91
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr89
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr79
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr77
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr75
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7001,7 +6825,7 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; SI-NEXT: s_lshr_b32 s83, s84, 16
; SI-NEXT: v_readfirstlane_b32 s4, v16
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB15_4
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s99, 16
@@ -7317,9 +7141,6 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB15_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB15_2
;
; VI-LABEL: bitcast_v60i16_to_v30i32_scalar:
; VI: ; %bb.0:
@@ -7840,9 +7661,9 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s63, s63, s76
; GFX9-NEXT: s_pack_ll_b32_b16 s64, s74, s75
; GFX9-NEXT: s_pack_ll_b32_b16 s65, s72, s73
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -7874,10 +7695,8 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v27, s63, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v28, s64, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v29, s65, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB15_5
+; GFX9-NEXT: s_branch .LBB15_4
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -7910,7 +7729,7 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB15_5: ; %end
+; GFX9-NEXT: .LBB15_4: ; %end
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
; GFX9-NEXT: v_readlane_b32 s64, v32, 12
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
@@ -8009,10 +7828,10 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s27, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s28, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s29, s45, s58
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -8046,8 +7865,6 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v29, s29, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -9033,7 +8850,7 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; SI-NEXT: s_cmp_lg_u32 s42, 0
; SI-NEXT: v_readfirstlane_b32 s42, v0
; SI-NEXT: v_writelane_b32 v30, s64, 16
-; SI-NEXT: s_cbranch_scc0 .LBB17_4
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s34, s5, 16
; SI-NEXT: s_lshr_b32 s35, s7, 16
@@ -9270,38 +9087,6 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB17_4:
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: s_branch .LBB17_2
;
; VI-LABEL: bitcast_v30i32_to_v60f16_scalar:
; VI: ; %bb.0:
@@ -9335,7 +9120,7 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s45, v0
; VI-NEXT: v_writelane_b32 v30, s39, 7
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s46, s6, 16
; VI-NEXT: s_lshr_b32 s47, s7, 16
@@ -9563,38 +9348,6 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: ; implicit-def: $sgpr39
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr37
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v30i32_to_v60f16_scalar:
; GFX9: ; %bb.0:
@@ -9624,7 +9377,7 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s45, v0
; GFX9-NEXT: v_writelane_b32 v30, s35, 3
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s46, s6, 16
; GFX9-NEXT: s_lshr_b32 s47, s7, 16
@@ -9788,38 +9541,6 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: ; implicit-def: $sgpr35
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr31
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v30i32_to_v60f16_scalar:
; GFX11: ; %bb.0:
@@ -9839,7 +9560,7 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s15, v0
; GFX11-NEXT: s_cmp_lg_u32 s40, 0
; GFX11-NEXT: s_mov_b32 s94, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s40, s4, 16
; GFX11-NEXT: s_lshr_b32 s41, s5, 16
@@ -9982,38 +9703,6 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v26, s7 :: v_dual_mov_b32 v27, s6
; GFX11-NEXT: v_dual_mov_b32 v28, s5 :: v_dual_mov_b32 v29, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: ; implicit-def: $sgpr93
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr91
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr89
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr79
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr77
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr75
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11612,7 +11301,7 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s80, s70, 16
; SI-NEXT: v_readfirstlane_b32 s4, v16
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s99, 16
@@ -11704,7 +11393,7 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s65, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB19_4
+; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s99
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -11946,11 +11635,8 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v28, v29, v28
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v30
; SI-NEXT: v_or_b32_e32 v29, v31, v29
-; SI-NEXT: s_branch .LBB19_5
+; SI-NEXT: s_branch .LBB19_4
; SI-NEXT: .LBB19_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB19_2
-; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -11983,7 +11669,7 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB19_5: ; %end
+; SI-NEXT: .LBB19_4: ; %end
; SI-NEXT: v_readlane_b32 s99, v32, 35
; SI-NEXT: v_readlane_b32 s98, v32, 34
; SI-NEXT: v_readlane_b32 s97, v32, 33
@@ -12528,9 +12214,9 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s63, s63, s76
; GFX9-NEXT: s_pack_ll_b32_b16 s64, s74, s75
; GFX9-NEXT: s_pack_ll_b32_b16 s65, s72, s73
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v29, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v29 op_sel_hi:[1,0]
@@ -12563,10 +12249,8 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v27, s63, v29 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v28, s64, v29 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v29, s65, v29 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB19_5
+; GFX9-NEXT: s_branch .LBB19_4
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -12599,7 +12283,7 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB19_5: ; %end
+; GFX9-NEXT: .LBB19_4: ; %end
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
; GFX9-NEXT: v_readlane_b32 s64, v32, 12
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
@@ -12698,10 +12382,10 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s27, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s28, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s29, s45, s58
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -12735,8 +12419,6 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v29, 0x200, s29 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -13000,9 +12682,9 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB21_3
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_4
+; SI-NEXT: s_cbranch_execnz .LBB21_3
; SI-NEXT: .LBB21_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v29, s65, 1.0
; SI-NEXT: v_add_f32_e64 v28, s64, 1.0
@@ -13034,10 +12716,8 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB21_5
+; SI-NEXT: s_branch .LBB21_4
; SI-NEXT: .LBB21_3:
-; SI-NEXT: s_branch .LBB21_2
-; SI-NEXT: .LBB21_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -13070,7 +12750,7 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB21_5: ; %end
+; SI-NEXT: .LBB21_4: ; %end
; SI-NEXT: v_readlane_b32 s65, v32, 13
; SI-NEXT: v_readlane_b32 s64, v32, 12
; SI-NEXT: v_readlane_b32 s55, v32, 11
@@ -13143,9 +12823,9 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB21_3
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_4
+; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v29, s65, 1.0
; VI-NEXT: v_add_f32_e64 v28, s64, 1.0
@@ -13177,10 +12857,8 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB21_5
+; VI-NEXT: s_branch .LBB21_4
; VI-NEXT: .LBB21_3:
-; VI-NEXT: s_branch .LBB21_2
-; VI-NEXT: .LBB21_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -13213,7 +12891,7 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB21_5: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: v_readlane_b32 s65, v32, 13
; VI-NEXT: v_readlane_b32 s64, v32, 12
; VI-NEXT: v_readlane_b32 s55, v32, 11
@@ -13286,9 +12964,9 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
+; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v29, s65, 1.0
; GFX9-NEXT: v_add_f32_e64 v28, s64, 1.0
@@ -13320,10 +12998,8 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB21_5
+; GFX9-NEXT: s_branch .LBB21_4
; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
-; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -13356,7 +13032,7 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB21_5: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
; GFX9-NEXT: v_readlane_b32 s64, v32, 12
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
@@ -13430,10 +13106,10 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s64, v10
; GFX11-NEXT: v_writelane_b32 v32, s65, 13
; GFX11-NEXT: v_readfirstlane_b32 s65, v11
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
; GFX11-NEXT: .LBB21_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v29, s65, 1.0
; GFX11-NEXT: v_add_f32_e64 v28, s64, 1.0
@@ -13465,10 +13141,8 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB21_5
+; GFX11-NEXT: s_branch .LBB21_4
; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
-; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -13485,7 +13159,7 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB21_5: ; %end
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
; GFX11-NEXT: v_readlane_b32 s64, v32, 12
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
@@ -13742,7 +13416,7 @@ define inreg <30 x float> @bitcast_v15i64_to_v30f32_scalar(<15 x i64> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s44, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s45, v0
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB23_3
; SI-NEXT: .LBB23_2: ; %cmp.true
@@ -13808,8 +13482,6 @@ define inreg <30 x float> @bitcast_v15i64_to_v30f32_scalar(<15 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v28, s7
; SI-NEXT: v_mov_b32_e32 v29, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v15i64_to_v30f32_scalar:
; VI: ; %bb.0:
@@ -13832,7 +13504,7 @@ define inreg <30 x float> @bitcast_v15i64_to_v30f32_scalar(<15 x i64> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s44, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s45, v0
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB23_3
; VI-NEXT: .LBB23_2: ; %cmp.true
@@ -13898,8 +13570,6 @@ define inreg <30 x float> @bitcast_v15i64_to_v30f32_scalar(<15 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v28, s7
; VI-NEXT: v_mov_b32_e32 v29, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v15i64_to_v30f32_scalar:
; GFX9: ; %bb.0:
@@ -13922,7 +13592,7 @@ define inreg <30 x float> @bitcast_v15i64_to_v30f32_scalar(<15 x i64> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s44, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s45, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB23_3
; GFX9-NEXT: .LBB23_2: ; %cmp.true
@@ -13988,8 +13658,6 @@ define inreg <30 x float> @bitcast_v15i64_to_v30f32_scalar(<15 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v28, s7
; GFX9-NEXT: v_mov_b32_e32 v29, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v15i64_to_v30f32_scalar:
; GFX11: ; %bb.0:
@@ -14009,7 +13677,7 @@ define inreg <30 x float> @bitcast_v15i64_to_v30f32_scalar(<15 x i64> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s15, v0
; GFX11-NEXT: s_cmp_lg_u32 s40, 0
; GFX11-NEXT: s_mov_b32 s40, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
@@ -14062,8 +13730,6 @@ define inreg <30 x float> @bitcast_v15i64_to_v30f32_scalar(<15 x i64> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s7 :: v_dual_mov_b32 v27, s6
; GFX11-NEXT: v_dual_mov_b32 v28, s5 :: v_dual_mov_b32 v29, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: s_branch .LBB23_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14310,9 +13976,9 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB25_3
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_4
+; SI-NEXT: s_cbranch_execnz .LBB25_3
; SI-NEXT: .LBB25_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v29, s65, 1.0
; SI-NEXT: v_add_f32_e64 v28, s64, 1.0
@@ -14344,10 +14010,8 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; SI-NEXT: v_add_f32_e64 v2, s38, 1.0
; SI-NEXT: v_add_f32_e64 v1, s37, 1.0
; SI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; SI-NEXT: s_branch .LBB25_5
+; SI-NEXT: s_branch .LBB25_4
; SI-NEXT: .LBB25_3:
-; SI-NEXT: s_branch .LBB25_2
-; SI-NEXT: .LBB25_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -14380,7 +14044,7 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB25_5: ; %end
+; SI-NEXT: .LBB25_4: ; %end
; SI-NEXT: v_readlane_b32 s65, v32, 13
; SI-NEXT: v_readlane_b32 s64, v32, 12
; SI-NEXT: v_readlane_b32 s55, v32, 11
@@ -14453,9 +14117,9 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB25_3
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_4
+; VI-NEXT: s_cbranch_execnz .LBB25_3
; VI-NEXT: .LBB25_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v29, s65, 1.0
; VI-NEXT: v_add_f32_e64 v28, s64, 1.0
@@ -14487,10 +14151,8 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; VI-NEXT: v_add_f32_e64 v2, s38, 1.0
; VI-NEXT: v_add_f32_e64 v1, s37, 1.0
; VI-NEXT: v_add_f32_e64 v0, s36, 1.0
-; VI-NEXT: s_branch .LBB25_5
+; VI-NEXT: s_branch .LBB25_4
; VI-NEXT: .LBB25_3:
-; VI-NEXT: s_branch .LBB25_2
-; VI-NEXT: .LBB25_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -14523,7 +14185,7 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB25_5: ; %end
+; VI-NEXT: .LBB25_4: ; %end
; VI-NEXT: v_readlane_b32 s65, v32, 13
; VI-NEXT: v_readlane_b32 s64, v32, 12
; VI-NEXT: v_readlane_b32 s55, v32, 11
@@ -14596,9 +14258,9 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_4
+; GFX9-NEXT: s_cbranch_execnz .LBB25_3
; GFX9-NEXT: .LBB25_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v29, s65, 1.0
; GFX9-NEXT: v_add_f32_e64 v28, s64, 1.0
@@ -14630,10 +14292,8 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; GFX9-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX9-NEXT: s_branch .LBB25_5
+; GFX9-NEXT: s_branch .LBB25_4
; GFX9-NEXT: .LBB25_3:
-; GFX9-NEXT: s_branch .LBB25_2
-; GFX9-NEXT: .LBB25_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -14666,7 +14326,7 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB25_5: ; %end
+; GFX9-NEXT: .LBB25_4: ; %end
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
; GFX9-NEXT: v_readlane_b32 s64, v32, 12
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
@@ -14740,10 +14400,10 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; GFX11-NEXT: v_readfirstlane_b32 s64, v10
; GFX11-NEXT: v_writelane_b32 v32, s65, 13
; GFX11-NEXT: v_readfirstlane_b32 s65, v11
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_3
; GFX11-NEXT: .LBB25_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v29, s65, 1.0
; GFX11-NEXT: v_add_f32_e64 v28, s64, 1.0
@@ -14775,10 +14435,8 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; GFX11-NEXT: v_add_f32_e64 v2, s38, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
-; GFX11-NEXT: s_branch .LBB25_5
+; GFX11-NEXT: s_branch .LBB25_4
; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
-; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -14795,7 +14453,7 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB25_5: ; %end
+; GFX11-NEXT: .LBB25_4: ; %end
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
; GFX11-NEXT: v_readlane_b32 s64, v32, 12
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
@@ -15016,9 +14674,9 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB27_3
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB27_4
+; SI-NEXT: s_cbranch_execnz .LBB27_3
; SI-NEXT: .LBB27_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
; SI-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
@@ -15035,10 +14693,8 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; SI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; SI-NEXT: s_branch .LBB27_5
+; SI-NEXT: s_branch .LBB27_4
; SI-NEXT: .LBB27_3:
-; SI-NEXT: s_branch .LBB27_2
-; SI-NEXT: .LBB27_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -15071,7 +14727,7 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB27_5: ; %end
+; SI-NEXT: .LBB27_4: ; %end
; SI-NEXT: v_readlane_b32 s65, v32, 13
; SI-NEXT: v_readlane_b32 s64, v32, 12
; SI-NEXT: v_readlane_b32 s55, v32, 11
@@ -15144,9 +14800,9 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB27_3
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_4
+; VI-NEXT: s_cbranch_execnz .LBB27_3
; VI-NEXT: .LBB27_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
; VI-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
@@ -15163,10 +14819,8 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; VI-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; VI-NEXT: s_branch .LBB27_5
+; VI-NEXT: s_branch .LBB27_4
; VI-NEXT: .LBB27_3:
-; VI-NEXT: s_branch .LBB27_2
-; VI-NEXT: .LBB27_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -15199,7 +14853,7 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB27_5: ; %end
+; VI-NEXT: .LBB27_4: ; %end
; VI-NEXT: v_readlane_b32 s65, v32, 13
; VI-NEXT: v_readlane_b32 s64, v32, 12
; VI-NEXT: v_readlane_b32 s55, v32, 11
@@ -15272,9 +14926,9 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_4
+; GFX9-NEXT: s_cbranch_execnz .LBB27_3
; GFX9-NEXT: .LBB27_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
; GFX9-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
@@ -15291,10 +14945,8 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; GFX9-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX9-NEXT: s_branch .LBB27_5
+; GFX9-NEXT: s_branch .LBB27_4
; GFX9-NEXT: .LBB27_3:
-; GFX9-NEXT: s_branch .LBB27_2
-; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -15327,7 +14979,7 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB27_5: ; %end
+; GFX9-NEXT: .LBB27_4: ; %end
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
; GFX9-NEXT: v_readlane_b32 s64, v32, 12
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
@@ -15401,10 +15053,10 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; GFX11-NEXT: v_readfirstlane_b32 s64, v10
; GFX11-NEXT: v_writelane_b32 v32, s65, 13
; GFX11-NEXT: v_readfirstlane_b32 s65, v11
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB27_3
; GFX11-NEXT: .LBB27_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
; GFX11-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
@@ -15421,10 +15073,8 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; GFX11-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
-; GFX11-NEXT: s_branch .LBB27_5
+; GFX11-NEXT: s_branch .LBB27_4
; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
-; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -15441,7 +15091,7 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB27_5: ; %end
+; GFX11-NEXT: .LBB27_4: ; %end
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
; GFX11-NEXT: v_readlane_b32 s64, v32, 12
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
@@ -16413,7 +16063,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: v_writelane_b32 v62, s64, 16
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s64, s5, 16
; SI-NEXT: s_lshr_b32 s55, s7, 16
@@ -16445,7 +16095,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[90:91], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[94:95], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB29_4
+; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v29, s5, 1.0
; SI-NEXT: v_add_f32_e64 v28, s4, 1.0
@@ -16514,40 +16164,8 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v1
-; SI-NEXT: s_branch .LBB29_5
+; SI-NEXT: s_branch .LBB29_4
; SI-NEXT: .LBB29_3:
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: s_branch .LBB29_2
-; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -16609,7 +16227,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v51, s90
; SI-NEXT: v_mov_b32_e32 v52, s92
; SI-NEXT: v_mov_b32_e32 v53, s94
-; SI-NEXT: .LBB29_5: ; %end
+; SI-NEXT: .LBB29_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v53
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v0, v39
@@ -16781,7 +16399,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: v_writelane_b32 v60, s39, 7
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s46, s6, 16
; VI-NEXT: s_lshr_b32 s47, s7, 16
@@ -16813,7 +16431,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; VI-NEXT: s_lshr_b32 s37, s18, 16
; VI-NEXT: s_lshr_b32 s38, s17, 16
; VI-NEXT: s_lshr_b32 s39, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB29_4
+; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v29, s6, 1.0
; VI-NEXT: v_add_f32_e64 v28, s7, 1.0
@@ -16875,40 +16493,8 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v57, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v58, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v59, 16, v0
-; VI-NEXT: s_branch .LBB29_5
+; VI-NEXT: s_branch .LBB29_4
; VI-NEXT: .LBB29_3:
-; VI-NEXT: ; implicit-def: $sgpr39
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr37
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB29_2
-; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -16969,7 +16555,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v32, s56
; VI-NEXT: v_mov_b32_e32 v31, s47
; VI-NEXT: v_mov_b32_e32 v30, s46
-; VI-NEXT: .LBB29_5: ; %end
+; VI-NEXT: .LBB29_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v59, 16, v59
; VI-NEXT: v_lshlrev_b32_e32 v58, 16, v58
; VI-NEXT: v_lshlrev_b32_e32 v57, 16, v57
@@ -17096,7 +16682,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_writelane_b32 v60, s35, 3
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s46, s6, 16
; GFX9-NEXT: s_lshr_b32 s47, s7, 16
@@ -17128,7 +16714,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: s_lshr_b32 s31, s18, 16
; GFX9-NEXT: s_lshr_b32 s34, s17, 16
; GFX9-NEXT: s_lshr_b32 s35, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
+; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v29, s6, 1.0
; GFX9-NEXT: v_add_f32_e64 v28, s7, 1.0
@@ -17190,40 +16776,8 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v57, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v58, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v59, 16, v0
-; GFX9-NEXT: s_branch .LBB29_5
+; GFX9-NEXT: s_branch .LBB29_4
; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: ; implicit-def: $sgpr35
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr31
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB29_2
-; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -17284,7 +16838,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v32, s56
; GFX9-NEXT: v_mov_b32_e32 v31, s47
; GFX9-NEXT: v_mov_b32_e32 v30, s46
-; GFX9-NEXT: .LBB29_5: ; %end
+; GFX9-NEXT: .LBB29_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -17385,7 +16939,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s15, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s40, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s4, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s5, 16
@@ -17418,7 +16972,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s93, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s94, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f32_e64 v29, s4, 1.0
; GFX11-TRUE16-NEXT: v_add_f32_e64 v28, s5, 1.0
@@ -17480,40 +17034,8 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB29_5
+; GFX11-TRUE16-NEXT: s_branch .LBB29_4
; GFX11-TRUE16-NEXT: .LBB29_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr94
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr93
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr92
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr91
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr89
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr79
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: s_branch .LBB29_2
-; GFX11-TRUE16-NEXT: .LBB29_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -17544,7 +17066,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, s46 :: v_dual_mov_b32 v34, s45
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
-; GFX11-TRUE16-NEXT: .LBB29_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB29_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v83, v83 :: v_dual_mov_b32 v82, v82
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v81, v81 :: v_dual_mov_b32 v80, v80
@@ -17611,7 +17133,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s15, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s40, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s4, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s5, 16
@@ -17644,7 +17166,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s93, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s94, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, s4, 1.0
; GFX11-FAKE16-NEXT: v_add_f32_e64 v26, s5, 1.0
@@ -17706,40 +17228,8 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v3
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v4
-; GFX11-FAKE16-NEXT: s_branch .LBB29_5
+; GFX11-FAKE16-NEXT: s_branch .LBB29_4
; GFX11-FAKE16-NEXT: .LBB29_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr94
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr93
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr92
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr91
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr89
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr79
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: s_branch .LBB29_2
-; GFX11-FAKE16-NEXT: .LBB29_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v3, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v1, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s16 :: v_dual_mov_b32 v9, s17
@@ -17770,7 +17260,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v35, s46 :: v_dual_mov_b32 v34, s45
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
-; GFX11-FAKE16-NEXT: .LBB29_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB29_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v84, 0xffff, v1
@@ -19286,7 +18776,7 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; SI-NEXT: s_lshr_b32 s83, s84, 16
; SI-NEXT: v_readfirstlane_b32 s4, v16
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s99, 16
@@ -19602,9 +19092,6 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v60i16_to_v30f32_scalar:
; VI: ; %bb.0:
@@ -20125,9 +19612,9 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s63, s63, s76
; GFX9-NEXT: s_pack_ll_b32_b16 s64, s74, s75
; GFX9-NEXT: s_pack_ll_b32_b16 s65, s72, s73
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_4
+; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -20159,10 +19646,8 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v27, s63, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v28, s64, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v29, s65, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB31_5
+; GFX9-NEXT: s_branch .LBB31_4
; GFX9-NEXT: .LBB31_3:
-; GFX9-NEXT: s_branch .LBB31_2
-; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -20195,7 +19680,7 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB31_5: ; %end
+; GFX9-NEXT: .LBB31_4: ; %end
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
; GFX9-NEXT: v_readlane_b32 s64, v32, 12
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
@@ -20294,10 +19779,10 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s27, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s28, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s29, s45, s58
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-NEXT: .LBB31_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -20331,8 +19816,6 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v29, s29, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
-; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -21302,7 +20785,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: v_writelane_b32 v62, s64, 16
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s64, s5, 16
; SI-NEXT: s_lshr_b32 s55, s7, 16
@@ -21334,7 +20817,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; SI-NEXT: s_lshr_b64 s[90:91], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[94:95], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB33_4
+; SI-NEXT: s_cbranch_execnz .LBB33_3
; SI-NEXT: .LBB33_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v29, s5, 1.0
; SI-NEXT: v_add_f32_e64 v28, s4, 1.0
@@ -21403,40 +20886,8 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v1
-; SI-NEXT: s_branch .LBB33_5
+; SI-NEXT: s_branch .LBB33_4
; SI-NEXT: .LBB33_3:
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: s_branch .LBB33_2
-; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -21498,7 +20949,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v51, s90
; SI-NEXT: v_mov_b32_e32 v52, s92
; SI-NEXT: v_mov_b32_e32 v53, s94
-; SI-NEXT: .LBB33_5: ; %end
+; SI-NEXT: .LBB33_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v53
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v0, v39
@@ -21670,7 +21121,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: v_writelane_b32 v60, s39, 7
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s46, s6, 16
; VI-NEXT: s_lshr_b32 s47, s7, 16
@@ -21702,7 +21153,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; VI-NEXT: s_lshr_b32 s37, s18, 16
; VI-NEXT: s_lshr_b32 s38, s17, 16
; VI-NEXT: s_lshr_b32 s39, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB33_4
+; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v29, s6, 1.0
; VI-NEXT: v_add_f32_e64 v28, s7, 1.0
@@ -21764,40 +21215,8 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v57, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v58, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v59, 16, v0
-; VI-NEXT: s_branch .LBB33_5
+; VI-NEXT: s_branch .LBB33_4
; VI-NEXT: .LBB33_3:
-; VI-NEXT: ; implicit-def: $sgpr39
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr37
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB33_2
-; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -21858,7 +21277,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v32, s56
; VI-NEXT: v_mov_b32_e32 v31, s47
; VI-NEXT: v_mov_b32_e32 v30, s46
-; VI-NEXT: .LBB33_5: ; %end
+; VI-NEXT: .LBB33_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v59, 16, v59
; VI-NEXT: v_lshlrev_b32_e32 v58, 16, v58
; VI-NEXT: v_lshlrev_b32_e32 v57, 16, v57
@@ -21985,7 +21404,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_writelane_b32 v60, s35, 3
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s46, s6, 16
; GFX9-NEXT: s_lshr_b32 s47, s7, 16
@@ -22017,7 +21436,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: s_lshr_b32 s31, s18, 16
; GFX9-NEXT: s_lshr_b32 s34, s17, 16
; GFX9-NEXT: s_lshr_b32 s35, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
+; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v29, s6, 1.0
; GFX9-NEXT: v_add_f32_e64 v28, s7, 1.0
@@ -22079,40 +21498,8 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v57, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v58, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v59, 16, v0
-; GFX9-NEXT: s_branch .LBB33_5
+; GFX9-NEXT: s_branch .LBB33_4
; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: ; implicit-def: $sgpr35
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr31
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB33_2
-; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -22173,7 +21560,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v32, s56
; GFX9-NEXT: v_mov_b32_e32 v31, s47
; GFX9-NEXT: v_mov_b32_e32 v30, s46
-; GFX9-NEXT: .LBB33_5: ; %end
+; GFX9-NEXT: .LBB33_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -22274,7 +21661,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s15, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s40, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s4, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s5, 16
@@ -22307,7 +21694,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s93, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s94, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f32_e64 v29, s4, 1.0
; GFX11-TRUE16-NEXT: v_add_f32_e64 v28, s5, 1.0
@@ -22369,40 +21756,8 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB33_5
+; GFX11-TRUE16-NEXT: s_branch .LBB33_4
; GFX11-TRUE16-NEXT: .LBB33_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr94
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr93
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr92
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr91
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr89
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr79
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: s_branch .LBB33_2
-; GFX11-TRUE16-NEXT: .LBB33_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -22433,7 +21788,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, s46 :: v_dual_mov_b32 v34, s45
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
-; GFX11-TRUE16-NEXT: .LBB33_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB33_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v83, v83 :: v_dual_mov_b32 v82, v82
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v81, v81 :: v_dual_mov_b32 v80, v80
@@ -22500,7 +21855,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s15, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s40, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s4, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s5, 16
@@ -22533,7 +21888,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s93, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s94, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, s4, 1.0
; GFX11-FAKE16-NEXT: v_add_f32_e64 v26, s5, 1.0
@@ -22595,40 +21950,8 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v3
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v4
-; GFX11-FAKE16-NEXT: s_branch .LBB33_5
+; GFX11-FAKE16-NEXT: s_branch .LBB33_4
; GFX11-FAKE16-NEXT: .LBB33_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr94
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr93
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr92
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr91
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr89
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr79
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: s_branch .LBB33_2
-; GFX11-FAKE16-NEXT: .LBB33_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v3, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v1, s3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s16 :: v_dual_mov_b32 v9, s17
@@ -22659,7 +21982,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v35, s46 :: v_dual_mov_b32 v34, s45
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
-; GFX11-FAKE16-NEXT: .LBB33_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB33_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v84, 0xffff, v1
@@ -24319,7 +23642,7 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: s_lshr_b32 s80, s70, 16
; SI-NEXT: v_readfirstlane_b32 s4, v16
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB35_3
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s99, 16
@@ -24411,7 +23734,7 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s65, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB35_4
+; SI-NEXT: s_cbranch_execnz .LBB35_3
; SI-NEXT: .LBB35_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s99
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -24653,11 +23976,8 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v28, v29, v28
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v30
; SI-NEXT: v_or_b32_e32 v29, v31, v29
-; SI-NEXT: s_branch .LBB35_5
+; SI-NEXT: s_branch .LBB35_4
; SI-NEXT: .LBB35_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB35_2
-; SI-NEXT: .LBB35_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -24690,7 +24010,7 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB35_5: ; %end
+; SI-NEXT: .LBB35_4: ; %end
; SI-NEXT: v_readlane_b32 s99, v32, 35
; SI-NEXT: v_readlane_b32 s98, v32, 34
; SI-NEXT: v_readlane_b32 s97, v32, 33
@@ -25235,9 +24555,9 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s63, s63, s76
; GFX9-NEXT: s_pack_ll_b32_b16 s64, s74, s75
; GFX9-NEXT: s_pack_ll_b32_b16 s65, s72, s73
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
+; GFX9-NEXT: s_cbranch_execnz .LBB35_3
; GFX9-NEXT: .LBB35_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v29, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v29 op_sel_hi:[1,0]
@@ -25270,10 +24590,8 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v27, s63, v29 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v28, s64, v29 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v29, s65, v29 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB35_5
+; GFX9-NEXT: s_branch .LBB35_4
; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
-; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -25306,7 +24624,7 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB35_5: ; %end
+; GFX9-NEXT: .LBB35_4: ; %end
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
; GFX9-NEXT: v_readlane_b32 s64, v32, 12
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
@@ -25405,10 +24723,10 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s27, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s28, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s29, s45, s58
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-NEXT: .LBB35_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -25442,8 +24760,6 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v29, 0x200, s29 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
-; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -25698,7 +25014,7 @@ define inreg <15 x double> @bitcast_v15i64_to_v15f64_scalar(<15 x i64> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s44, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s45, v0
-; SI-NEXT: s_cbranch_scc0 .LBB37_4
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB37_3
; SI-NEXT: .LBB37_2: ; %cmp.true
@@ -25764,8 +25080,6 @@ define inreg <15 x double> @bitcast_v15i64_to_v15f64_scalar(<15 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v28, s7
; SI-NEXT: v_mov_b32_e32 v29, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_4:
-; SI-NEXT: s_branch .LBB37_2
;
; VI-LABEL: bitcast_v15i64_to_v15f64_scalar:
; VI: ; %bb.0:
@@ -25788,7 +25102,7 @@ define inreg <15 x double> @bitcast_v15i64_to_v15f64_scalar(<15 x i64> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s44, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s45, v0
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB37_3
; VI-NEXT: .LBB37_2: ; %cmp.true
@@ -25854,8 +25168,6 @@ define inreg <15 x double> @bitcast_v15i64_to_v15f64_scalar(<15 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v28, s7
; VI-NEXT: v_mov_b32_e32 v29, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v15i64_to_v15f64_scalar:
; GFX9: ; %bb.0:
@@ -25878,7 +25190,7 @@ define inreg <15 x double> @bitcast_v15i64_to_v15f64_scalar(<15 x i64> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s44, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s45, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB37_3
; GFX9-NEXT: .LBB37_2: ; %cmp.true
@@ -25944,8 +25256,6 @@ define inreg <15 x double> @bitcast_v15i64_to_v15f64_scalar(<15 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v28, s7
; GFX9-NEXT: v_mov_b32_e32 v29, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v15i64_to_v15f64_scalar:
; GFX11: ; %bb.0:
@@ -25965,7 +25275,7 @@ define inreg <15 x double> @bitcast_v15i64_to_v15f64_scalar(<15 x i64> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s15, v0
; GFX11-NEXT: s_cmp_lg_u32 s40, 0
; GFX11-NEXT: s_mov_b32 s40, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
@@ -26017,8 +25327,6 @@ define inreg <15 x double> @bitcast_v15i64_to_v15f64_scalar(<15 x i64> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s7 :: v_dual_mov_b32 v27, s6
; GFX11-NEXT: v_dual_mov_b32 v28, s5 :: v_dual_mov_b32 v29, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_4:
-; GFX11-NEXT: s_branch .LBB37_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -26220,9 +25528,9 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s51, v1
; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: v_readfirstlane_b32 s50, v0
-; SI-NEXT: s_cbranch_scc0 .LBB39_3
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_4
+; SI-NEXT: s_cbranch_execnz .LBB39_3
; SI-NEXT: .LBB39_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -26239,10 +25547,8 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; SI-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
; SI-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
; SI-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
-; SI-NEXT: s_branch .LBB39_5
+; SI-NEXT: s_branch .LBB39_4
; SI-NEXT: .LBB39_3:
-; SI-NEXT: s_branch .LBB39_2
-; SI-NEXT: .LBB39_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -26275,7 +25581,7 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB39_5: ; %end
+; SI-NEXT: .LBB39_4: ; %end
; SI-NEXT: v_readlane_b32 s65, v32, 13
; SI-NEXT: v_readlane_b32 s64, v32, 12
; SI-NEXT: v_readlane_b32 s55, v32, 11
@@ -26348,9 +25654,9 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s51, v1
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s50, v0
-; VI-NEXT: s_cbranch_scc0 .LBB39_3
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_4
+; VI-NEXT: s_cbranch_execnz .LBB39_3
; VI-NEXT: .LBB39_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -26367,10 +25673,8 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; VI-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
; VI-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
; VI-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
-; VI-NEXT: s_branch .LBB39_5
+; VI-NEXT: s_branch .LBB39_4
; VI-NEXT: .LBB39_3:
-; VI-NEXT: s_branch .LBB39_2
-; VI-NEXT: .LBB39_4:
; VI-NEXT: v_mov_b32_e32 v0, s36
; VI-NEXT: v_mov_b32_e32 v1, s37
; VI-NEXT: v_mov_b32_e32 v2, s38
@@ -26403,7 +25707,7 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v29, s65
; VI-NEXT: v_mov_b32_e32 v30, s66
; VI-NEXT: v_mov_b32_e32 v31, s67
-; VI-NEXT: .LBB39_5: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: v_readlane_b32 s65, v32, 13
; VI-NEXT: v_readlane_b32 s64, v32, 12
; VI-NEXT: v_readlane_b32 s55, v32, 11
@@ -26476,9 +25780,9 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s51, v1
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s50, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
+; GFX9-NEXT: s_cbranch_execnz .LBB39_3
; GFX9-NEXT: .LBB39_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -26495,10 +25799,8 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
; GFX9-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
; GFX9-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
-; GFX9-NEXT: s_branch .LBB39_5
+; GFX9-NEXT: s_branch .LBB39_4
; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
-; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -26531,7 +25833,7 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB39_5: ; %end
+; GFX9-NEXT: .LBB39_4: ; %end
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
; GFX9-NEXT: v_readlane_b32 s64, v32, 12
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
@@ -26605,10 +25907,10 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s64, v10
; GFX11-NEXT: v_writelane_b32 v32, s65, 13
; GFX11-NEXT: v_readfirstlane_b32 s65, v11
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_3
; GFX11-NEXT: .LBB39_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
@@ -26625,10 +25927,8 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[24:25], s[60:61], 1.0
; GFX11-NEXT: v_add_f64 v[26:27], s[62:63], 1.0
; GFX11-NEXT: v_add_f64 v[28:29], s[64:65], 1.0
-; GFX11-NEXT: s_branch .LBB39_5
+; GFX11-NEXT: s_branch .LBB39_4
; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
-; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
; GFX11-NEXT: v_dual_mov_b32 v4, s40 :: v_dual_mov_b32 v5, s41
@@ -26645,7 +25945,7 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v26, s62 :: v_dual_mov_b32 v27, s63
; GFX11-NEXT: v_dual_mov_b32 v28, s64 :: v_dual_mov_b32 v29, s65
; GFX11-NEXT: v_dual_mov_b32 v30, s66 :: v_dual_mov_b32 v31, s67
-; GFX11-NEXT: .LBB39_5: ; %end
+; GFX11-NEXT: .LBB39_4: ; %end
; GFX11-NEXT: v_readlane_b32 s65, v32, 13
; GFX11-NEXT: v_readlane_b32 s64, v32, 12
; GFX11-NEXT: v_readlane_b32 s55, v32, 11
@@ -27649,7 +26949,7 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; SI-NEXT: s_cmp_lg_u32 s42, 0
; SI-NEXT: v_readfirstlane_b32 s42, v0
; SI-NEXT: v_writelane_b32 v30, s64, 16
-; SI-NEXT: s_cbranch_scc0 .LBB41_4
+; SI-NEXT: s_cbranch_scc0 .LBB41_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s34, s5, 16
; SI-NEXT: s_lshr_b32 s35, s7, 16
@@ -27886,38 +27186,6 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB41_4:
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: s_branch .LBB41_2
;
; VI-LABEL: bitcast_v15i64_to_v60i16_scalar:
; VI: ; %bb.0:
@@ -27951,7 +27219,7 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s45, v0
; VI-NEXT: v_writelane_b32 v30, s39, 7
-; VI-NEXT: s_cbranch_scc0 .LBB41_4
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s46, s6, 16
; VI-NEXT: s_lshr_b32 s47, s7, 16
@@ -28179,38 +27447,6 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB41_4:
-; VI-NEXT: ; implicit-def: $sgpr39
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr37
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB41_2
;
; GFX9-LABEL: bitcast_v15i64_to_v60i16_scalar:
; GFX9: ; %bb.0:
@@ -28240,7 +27476,7 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s45, v0
; GFX9-NEXT: v_writelane_b32 v30, s35, 3
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s46, s6, 16
; GFX9-NEXT: s_lshr_b32 s47, s7, 16
@@ -28404,38 +27640,6 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB41_4:
-; GFX9-NEXT: ; implicit-def: $sgpr35
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr31
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB41_2
;
; GFX11-LABEL: bitcast_v15i64_to_v60i16_scalar:
; GFX11: ; %bb.0:
@@ -28455,7 +27659,7 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s15, v0
; GFX11-NEXT: s_cmp_lg_u32 s40, 0
; GFX11-NEXT: s_mov_b32 s94, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s40, s4, 16
; GFX11-NEXT: s_lshr_b32 s41, s5, 16
@@ -28598,38 +27802,6 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v26, s7 :: v_dual_mov_b32 v27, s6
; GFX11-NEXT: v_dual_mov_b32 v28, s5 :: v_dual_mov_b32 v29, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB41_4:
-; GFX11-NEXT: ; implicit-def: $sgpr93
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr91
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr89
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr79
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr77
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr75
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: s_branch .LBB41_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -30084,7 +29256,7 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; SI-NEXT: s_lshr_b32 s83, s84, 16
; SI-NEXT: v_readfirstlane_b32 s4, v16
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB43_4
+; SI-NEXT: s_cbranch_scc0 .LBB43_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s99, 16
@@ -30400,9 +29572,6 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB43_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB43_2
;
; VI-LABEL: bitcast_v60i16_to_v15i64_scalar:
; VI: ; %bb.0:
@@ -30923,9 +30092,9 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s63, s63, s76
; GFX9-NEXT: s_pack_ll_b32_b16 s64, s74, s75
; GFX9-NEXT: s_pack_ll_b32_b16 s65, s72, s73
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
+; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -30957,10 +30126,8 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v27, s63, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v28, s64, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v29, s65, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB43_5
+; GFX9-NEXT: s_branch .LBB43_4
; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
-; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -30993,7 +30160,7 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB43_5: ; %end
+; GFX9-NEXT: .LBB43_4: ; %end
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
; GFX9-NEXT: v_readlane_b32 s64, v32, 12
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
@@ -31092,10 +30259,10 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s27, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s28, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s29, s45, s58
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-NEXT: .LBB43_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -31129,8 +30296,6 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v29, s29, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
-; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -32132,7 +31297,7 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; SI-NEXT: s_cmp_lg_u32 s42, 0
; SI-NEXT: v_readfirstlane_b32 s42, v0
; SI-NEXT: v_writelane_b32 v30, s64, 16
-; SI-NEXT: s_cbranch_scc0 .LBB45_4
+; SI-NEXT: s_cbranch_scc0 .LBB45_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s34, s5, 16
; SI-NEXT: s_lshr_b32 s35, s7, 16
@@ -32369,38 +31534,6 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB45_4:
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: s_branch .LBB45_2
;
; VI-LABEL: bitcast_v15i64_to_v60f16_scalar:
; VI: ; %bb.0:
@@ -32434,7 +31567,7 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: v_readfirstlane_b32 s45, v0
; VI-NEXT: v_writelane_b32 v30, s39, 7
-; VI-NEXT: s_cbranch_scc0 .LBB45_4
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s46, s6, 16
; VI-NEXT: s_lshr_b32 s47, s7, 16
@@ -32662,38 +31795,6 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_4:
-; VI-NEXT: ; implicit-def: $sgpr39
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr37
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB45_2
;
; GFX9-LABEL: bitcast_v15i64_to_v60f16_scalar:
; GFX9: ; %bb.0:
@@ -32723,7 +31824,7 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; GFX9-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-NEXT: v_readfirstlane_b32 s45, v0
; GFX9-NEXT: v_writelane_b32 v30, s35, 3
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s46, s6, 16
; GFX9-NEXT: s_lshr_b32 s47, s7, 16
@@ -32887,38 +31988,6 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_4:
-; GFX9-NEXT: ; implicit-def: $sgpr35
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr31
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB45_2
;
; GFX11-LABEL: bitcast_v15i64_to_v60f16_scalar:
; GFX11: ; %bb.0:
@@ -32938,7 +32007,7 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s15, v0
; GFX11-NEXT: s_cmp_lg_u32 s40, 0
; GFX11-NEXT: s_mov_b32 s94, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s40, s4, 16
; GFX11-NEXT: s_lshr_b32 s41, s5, 16
@@ -33081,38 +32150,6 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v26, s7 :: v_dual_mov_b32 v27, s6
; GFX11-NEXT: v_dual_mov_b32 v28, s5 :: v_dual_mov_b32 v29, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_4:
-; GFX11-NEXT: ; implicit-def: $sgpr93
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr91
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr89
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr79
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr77
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr75
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: s_branch .LBB45_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -34711,7 +33748,7 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s80, s70, 16
; SI-NEXT: v_readfirstlane_b32 s4, v16
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB47_3
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s99, 16
@@ -34803,7 +33840,7 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s65, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB47_4
+; SI-NEXT: s_cbranch_execnz .LBB47_3
; SI-NEXT: .LBB47_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s99
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -35045,11 +34082,8 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v28, v29, v28
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v30
; SI-NEXT: v_or_b32_e32 v29, v31, v29
-; SI-NEXT: s_branch .LBB47_5
+; SI-NEXT: s_branch .LBB47_4
; SI-NEXT: .LBB47_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB47_2
-; SI-NEXT: .LBB47_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -35082,7 +34116,7 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB47_5: ; %end
+; SI-NEXT: .LBB47_4: ; %end
; SI-NEXT: v_readlane_b32 s99, v32, 35
; SI-NEXT: v_readlane_b32 s98, v32, 34
; SI-NEXT: v_readlane_b32 s97, v32, 33
@@ -35627,9 +34661,9 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s63, s63, s76
; GFX9-NEXT: s_pack_ll_b32_b16 s64, s74, s75
; GFX9-NEXT: s_pack_ll_b32_b16 s65, s72, s73
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
+; GFX9-NEXT: s_cbranch_execnz .LBB47_3
; GFX9-NEXT: .LBB47_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v29, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v29 op_sel_hi:[1,0]
@@ -35662,10 +34696,8 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v27, s63, v29 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v28, s64, v29 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v29, s65, v29 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB47_5
+; GFX9-NEXT: s_branch .LBB47_4
; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
-; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -35698,7 +34730,7 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB47_5: ; %end
+; GFX9-NEXT: .LBB47_4: ; %end
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
; GFX9-NEXT: v_readlane_b32 s64, v32, 12
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
@@ -35797,10 +34829,10 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s27, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s28, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s29, s45, s58
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-NEXT: .LBB47_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -35834,8 +34866,6 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v29, 0x200, s29 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -36760,7 +35790,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: v_writelane_b32 v62, s64, 16
-; SI-NEXT: s_cbranch_scc0 .LBB49_3
+; SI-NEXT: s_cbranch_scc0 .LBB49_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s64, s15, 16
; SI-NEXT: s_lshr_b32 s55, s41, 16
@@ -36792,7 +35822,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; SI-NEXT: s_lshr_b64 s[92:93], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB49_4
+; SI-NEXT: s_cbranch_execnz .LBB49_3
; SI-NEXT: .LBB49_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[28:29], s[14:15], 1.0
; SI-NEXT: v_add_f64 v[26:27], s[40:41], 1.0
@@ -36846,40 +35876,8 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v1
-; SI-NEXT: s_branch .LBB49_5
+; SI-NEXT: s_branch .LBB49_4
; SI-NEXT: .LBB49_3:
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: s_branch .LBB49_2
-; SI-NEXT: .LBB49_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -36947,7 +35945,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v32, s56
; SI-NEXT: v_mov_b32_e32 v31, s44
; SI-NEXT: v_mov_b32_e32 v30, s46
-; SI-NEXT: .LBB49_5: ; %end
+; SI-NEXT: .LBB49_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v53
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v0, v39
@@ -37119,7 +36117,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: v_writelane_b32 v60, s39, 7
-; VI-NEXT: s_cbranch_scc0 .LBB49_3
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s46, s9, 16
; VI-NEXT: s_lshr_b32 s77, s8, 16
@@ -37151,7 +36149,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; VI-NEXT: s_lshr_b32 s38, s18, 16
; VI-NEXT: s_lshr_b32 s76, s17, 16
; VI-NEXT: s_lshr_b32 s39, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB49_4
+; VI-NEXT: s_cbranch_execnz .LBB49_3
; VI-NEXT: .LBB49_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[28:29], s[8:9], 1.0
; VI-NEXT: v_add_f64 v[26:27], s[10:11], 1.0
@@ -37198,40 +36196,8 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v46, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v59, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v56, 16, v0
-; VI-NEXT: s_branch .LBB49_5
+; VI-NEXT: s_branch .LBB49_4
; VI-NEXT: .LBB49_3:
-; VI-NEXT: ; implicit-def: $sgpr39
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr37
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB49_2
-; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -37292,7 +36258,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v37, s56
; VI-NEXT: v_mov_b32_e32 v35, s47
; VI-NEXT: v_mov_b32_e32 v33, s46
-; VI-NEXT: .LBB49_5: ; %end
+; VI-NEXT: .LBB49_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v56, 16, v56
; VI-NEXT: v_lshlrev_b32_e32 v46, 16, v46
; VI-NEXT: v_lshlrev_b32_e32 v44, 16, v44
@@ -37419,7 +36385,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_writelane_b32 v60, s35, 3
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s46, s9, 16
; GFX9-NEXT: s_lshr_b32 s77, s8, 16
@@ -37451,7 +36417,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; GFX9-NEXT: s_lshr_b32 s34, s18, 16
; GFX9-NEXT: s_lshr_b32 s76, s17, 16
; GFX9-NEXT: s_lshr_b32 s35, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
+; GFX9-NEXT: s_cbranch_execnz .LBB49_3
; GFX9-NEXT: .LBB49_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[28:29], s[8:9], 1.0
; GFX9-NEXT: v_add_f64 v[26:27], s[10:11], 1.0
@@ -37498,40 +36464,8 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v59, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v56, 16, v0
-; GFX9-NEXT: s_branch .LBB49_5
+; GFX9-NEXT: s_branch .LBB49_4
; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: ; implicit-def: $sgpr35
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr31
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB49_2
-; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -37592,7 +36526,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v37, s56
; GFX9-NEXT: v_mov_b32_e32 v35, s47
; GFX9-NEXT: v_mov_b32_e32 v33, s46
-; GFX9-NEXT: .LBB49_5: ; %end
+; GFX9-NEXT: .LBB49_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -37693,7 +36627,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s12, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s40, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s15, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s72, s14, 16
@@ -37726,7 +36660,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s63, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s94, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f64 v[28:29], s[14:15], 1.0
; GFX11-TRUE16-NEXT: v_add_f64 v[26:27], s[10:11], 1.0
@@ -37773,40 +36707,8 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB49_5
+; GFX11-TRUE16-NEXT: s_branch .LBB49_4
; GFX11-TRUE16-NEXT: .LBB49_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr94
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr93
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr92
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr91
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr89
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr79
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: s_branch .LBB49_2
-; GFX11-TRUE16-NEXT: .LBB49_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -37837,7 +36739,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, s74 :: v_dual_mov_b32 v36, s43
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s73 :: v_dual_mov_b32 v34, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v83, s63 :: v_dual_mov_b32 v32, s41
-; GFX11-TRUE16-NEXT: .LBB49_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB49_4: ; %end
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v64, v64
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v38, v38
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v51, v51
@@ -37908,7 +36810,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s12, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s40, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s15, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s72, s14, 16
@@ -37941,7 +36843,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s63, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s94, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f64 v[25:26], s[14:15], 1.0
; GFX11-FAKE16-NEXT: v_add_f64 v[27:28], s[10:11], 1.0
@@ -37988,40 +36890,8 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v3
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
-; GFX11-FAKE16-NEXT: s_branch .LBB49_5
+; GFX11-FAKE16-NEXT: s_branch .LBB49_4
; GFX11-FAKE16-NEXT: .LBB49_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr94
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr93
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr92
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr91
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr89
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr79
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: s_branch .LBB49_2
-; GFX11-FAKE16-NEXT: .LBB49_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v33, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v7, s18
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s20 :: v_dual_mov_b32 v12, s22
@@ -38052,7 +36922,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v53, s46 :: v_dual_mov_b32 v50, s44
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v51, s45 :: v_dual_mov_b32 v48, s43
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v38, s42 :: v_dual_mov_b32 v37, s41
-; GFX11-FAKE16-NEXT: .LBB49_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB49_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -39568,7 +38438,7 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; SI-NEXT: s_lshr_b32 s83, s84, 16
; SI-NEXT: v_readfirstlane_b32 s4, v16
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB51_4
+; SI-NEXT: s_cbranch_scc0 .LBB51_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s99, 16
@@ -39884,9 +38754,6 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB51_4:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB51_2
;
; VI-LABEL: bitcast_v60i16_to_v15f64_scalar:
; VI: ; %bb.0:
@@ -40407,9 +39274,9 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s63, s63, s76
; GFX9-NEXT: s_pack_ll_b32_b16 s64, s74, s75
; GFX9-NEXT: s_pack_ll_b32_b16 s65, s72, s73
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB51_4
+; GFX9-NEXT: s_cbranch_execnz .LBB51_3
; GFX9-NEXT: .LBB51_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s36, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s37, 3 op_sel_hi:[1,0]
@@ -40441,10 +39308,8 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v27, s63, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v28, s64, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v29, s65, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB51_5
+; GFX9-NEXT: s_branch .LBB51_4
; GFX9-NEXT: .LBB51_3:
-; GFX9-NEXT: s_branch .LBB51_2
-; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -40477,7 +39342,7 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB51_5: ; %end
+; GFX9-NEXT: .LBB51_4: ; %end
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
; GFX9-NEXT: v_readlane_b32 s64, v32, 12
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
@@ -40576,10 +39441,10 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s27, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s28, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s29, s45, s58
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-NEXT: .LBB51_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
@@ -40613,8 +39478,6 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v29, s29, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
-; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -41539,7 +40402,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: v_writelane_b32 v62, s64, 16
-; SI-NEXT: s_cbranch_scc0 .LBB53_3
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s64, s15, 16
; SI-NEXT: s_lshr_b32 s55, s41, 16
@@ -41571,7 +40434,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; SI-NEXT: s_lshr_b64 s[92:93], s[20:21], 16
; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB53_4
+; SI-NEXT: s_cbranch_execnz .LBB53_3
; SI-NEXT: .LBB53_2: ; %cmp.true
; SI-NEXT: v_add_f64 v[28:29], s[14:15], 1.0
; SI-NEXT: v_add_f64 v[26:27], s[40:41], 1.0
@@ -41625,40 +40488,8 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v1
-; SI-NEXT: s_branch .LBB53_5
+; SI-NEXT: s_branch .LBB53_4
; SI-NEXT: .LBB53_3:
-; SI-NEXT: ; implicit-def: $sgpr30
-; SI-NEXT: ; implicit-def: $sgpr34
-; SI-NEXT: ; implicit-def: $sgpr94
-; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr92
-; SI-NEXT: ; implicit-def: $sgpr36
-; SI-NEXT: ; implicit-def: $sgpr90
-; SI-NEXT: ; implicit-def: $sgpr37
-; SI-NEXT: ; implicit-def: $sgpr88
-; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr78
-; SI-NEXT: ; implicit-def: $sgpr39
-; SI-NEXT: ; implicit-def: $sgpr76
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr49
-; SI-NEXT: ; implicit-def: $sgpr72
-; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr52
-; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: ; implicit-def: $sgpr55
-; SI-NEXT: ; implicit-def: $sgpr64
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: s_branch .LBB53_2
-; SI-NEXT: .LBB53_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -41726,7 +40557,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; SI-NEXT: v_mov_b32_e32 v32, s56
; SI-NEXT: v_mov_b32_e32 v31, s44
; SI-NEXT: v_mov_b32_e32 v30, s46
-; SI-NEXT: .LBB53_5: ; %end
+; SI-NEXT: .LBB53_4: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v53
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v0, v39
@@ -41898,7 +40729,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: v_writelane_b32 v60, s39, 7
-; VI-NEXT: s_cbranch_scc0 .LBB53_3
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s46, s9, 16
; VI-NEXT: s_lshr_b32 s77, s8, 16
@@ -41930,7 +40761,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; VI-NEXT: s_lshr_b32 s38, s18, 16
; VI-NEXT: s_lshr_b32 s76, s17, 16
; VI-NEXT: s_lshr_b32 s39, s16, 16
-; VI-NEXT: s_cbranch_execnz .LBB53_4
+; VI-NEXT: s_cbranch_execnz .LBB53_3
; VI-NEXT: .LBB53_2: ; %cmp.true
; VI-NEXT: v_add_f64 v[28:29], s[8:9], 1.0
; VI-NEXT: v_add_f64 v[26:27], s[10:11], 1.0
@@ -41977,40 +40808,8 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; VI-NEXT: v_lshrrev_b32_e32 v46, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v59, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v56, 16, v0
-; VI-NEXT: s_branch .LBB53_5
+; VI-NEXT: s_branch .LBB53_4
; VI-NEXT: .LBB53_3:
-; VI-NEXT: ; implicit-def: $sgpr39
-; VI-NEXT: ; implicit-def: $sgpr76
-; VI-NEXT: ; implicit-def: $sgpr38
-; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: ; implicit-def: $sgpr37
-; VI-NEXT: ; implicit-def: $sgpr74
-; VI-NEXT: ; implicit-def: $sgpr36
-; VI-NEXT: ; implicit-def: $sgpr73
-; VI-NEXT: ; implicit-def: $sgpr35
-; VI-NEXT: ; implicit-def: $sgpr72
-; VI-NEXT: ; implicit-def: $sgpr34
-; VI-NEXT: ; implicit-def: $sgpr63
-; VI-NEXT: ; implicit-def: $sgpr31
-; VI-NEXT: ; implicit-def: $sgpr62
-; VI-NEXT: ; implicit-def: $sgpr30
-; VI-NEXT: ; implicit-def: $sgpr61
-; VI-NEXT: ; implicit-def: $sgpr91
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr90
-; VI-NEXT: ; implicit-def: $sgpr59
-; VI-NEXT: ; implicit-def: $sgpr89
-; VI-NEXT: ; implicit-def: $sgpr58
-; VI-NEXT: ; implicit-def: $sgpr88
-; VI-NEXT: ; implicit-def: $sgpr57
-; VI-NEXT: ; implicit-def: $sgpr79
-; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: ; implicit-def: $sgpr78
-; VI-NEXT: ; implicit-def: $sgpr47
-; VI-NEXT: ; implicit-def: $sgpr77
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB53_2
-; VI-NEXT: .LBB53_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -42071,7 +40870,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; VI-NEXT: v_mov_b32_e32 v37, s56
; VI-NEXT: v_mov_b32_e32 v35, s47
; VI-NEXT: v_mov_b32_e32 v33, s46
-; VI-NEXT: .LBB53_5: ; %end
+; VI-NEXT: .LBB53_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v56, 16, v56
; VI-NEXT: v_lshlrev_b32_e32 v46, 16, v46
; VI-NEXT: v_lshlrev_b32_e32 v44, 16, v44
@@ -42198,7 +40997,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_writelane_b32 v60, s35, 3
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s46, s9, 16
; GFX9-NEXT: s_lshr_b32 s77, s8, 16
@@ -42230,7 +41029,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; GFX9-NEXT: s_lshr_b32 s34, s18, 16
; GFX9-NEXT: s_lshr_b32 s76, s17, 16
; GFX9-NEXT: s_lshr_b32 s35, s16, 16
-; GFX9-NEXT: s_cbranch_execnz .LBB53_4
+; GFX9-NEXT: s_cbranch_execnz .LBB53_3
; GFX9-NEXT: .LBB53_2: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[28:29], s[8:9], 1.0
; GFX9-NEXT: v_add_f64 v[26:27], s[10:11], 1.0
@@ -42277,40 +41076,8 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v59, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v56, 16, v0
-; GFX9-NEXT: s_branch .LBB53_5
+; GFX9-NEXT: s_branch .LBB53_4
; GFX9-NEXT: .LBB53_3:
-; GFX9-NEXT: ; implicit-def: $sgpr35
-; GFX9-NEXT: ; implicit-def: $sgpr76
-; GFX9-NEXT: ; implicit-def: $sgpr34
-; GFX9-NEXT: ; implicit-def: $sgpr75
-; GFX9-NEXT: ; implicit-def: $sgpr31
-; GFX9-NEXT: ; implicit-def: $sgpr74
-; GFX9-NEXT: ; implicit-def: $sgpr30
-; GFX9-NEXT: ; implicit-def: $sgpr73
-; GFX9-NEXT: ; implicit-def: $sgpr95
-; GFX9-NEXT: ; implicit-def: $sgpr72
-; GFX9-NEXT: ; implicit-def: $sgpr94
-; GFX9-NEXT: ; implicit-def: $sgpr63
-; GFX9-NEXT: ; implicit-def: $sgpr93
-; GFX9-NEXT: ; implicit-def: $sgpr62
-; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: ; implicit-def: $sgpr61
-; GFX9-NEXT: ; implicit-def: $sgpr91
-; GFX9-NEXT: ; implicit-def: $sgpr60
-; GFX9-NEXT: ; implicit-def: $sgpr90
-; GFX9-NEXT: ; implicit-def: $sgpr59
-; GFX9-NEXT: ; implicit-def: $sgpr89
-; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr88
-; GFX9-NEXT: ; implicit-def: $sgpr57
-; GFX9-NEXT: ; implicit-def: $sgpr79
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr77
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB53_2
-; GFX9-NEXT: .LBB53_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -42371,7 +41138,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v37, s56
; GFX9-NEXT: v_mov_b32_e32 v35, s47
; GFX9-NEXT: v_mov_b32_e32 v33, s46
-; GFX9-NEXT: .LBB53_5: ; %end
+; GFX9-NEXT: .LBB53_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -42472,7 +41239,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s12, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s40, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s15, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s72, s14, 16
@@ -42505,7 +41272,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; GFX11-TRUE16-NEXT: s_lshr_b32 s63, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s94, s0, 16
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f64 v[28:29], s[14:15], 1.0
; GFX11-TRUE16-NEXT: v_add_f64 v[26:27], s[10:11], 1.0
@@ -42552,40 +41319,8 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v1
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v0
-; GFX11-TRUE16-NEXT: s_branch .LBB53_5
+; GFX11-TRUE16-NEXT: s_branch .LBB53_4
; GFX11-TRUE16-NEXT: .LBB53_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr94
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr93
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr92
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr91
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr89
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr79
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-TRUE16-NEXT: s_branch .LBB53_2
-; GFX11-TRUE16-NEXT: .LBB53_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -42616,7 +41351,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, s74 :: v_dual_mov_b32 v36, s43
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s73 :: v_dual_mov_b32 v34, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v83, s63 :: v_dual_mov_b32 v32, s41
-; GFX11-TRUE16-NEXT: .LBB53_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB53_4: ; %end
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v64, v64
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v38, v38
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v51, v51
@@ -42687,7 +41422,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s12, v0
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s40, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s15, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s72, s14, 16
@@ -42720,7 +41455,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; GFX11-FAKE16-NEXT: s_lshr_b32 s63, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s94, s0, 16
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f64 v[25:26], s[14:15], 1.0
; GFX11-FAKE16-NEXT: v_add_f64 v[27:28], s[10:11], 1.0
@@ -42767,40 +41502,8 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v3
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
-; GFX11-FAKE16-NEXT: s_branch .LBB53_5
+; GFX11-FAKE16-NEXT: s_branch .LBB53_4
; GFX11-FAKE16-NEXT: .LBB53_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr94
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr63
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr93
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr62
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr92
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr61
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr91
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr60
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr90
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr59
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr89
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr58
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr88
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr57
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr79
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr56
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr78
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr47
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr77
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr76
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr75
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr73
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr72
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
-; GFX11-FAKE16-NEXT: s_branch .LBB53_2
-; GFX11-FAKE16-NEXT: .LBB53_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v33, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v7, s18
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s20 :: v_dual_mov_b32 v12, s22
@@ -42831,7 +41534,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v53, s46 :: v_dual_mov_b32 v50, s44
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v51, s45 :: v_dual_mov_b32 v48, s43
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v38, s42 :: v_dual_mov_b32 v37, s41
-; GFX11-FAKE16-NEXT: .LBB53_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB53_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -44491,7 +43194,7 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: s_lshr_b32 s80, s70, 16
; SI-NEXT: v_readfirstlane_b32 s4, v16
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB55_3
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s99, 16
@@ -44583,7 +43286,7 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: s_and_b32 s4, s6, 0xffff
; SI-NEXT: s_lshl_b32 s5, s7, 16
; SI-NEXT: s_or_b32 s65, s4, s5
-; SI-NEXT: s_cbranch_execnz .LBB55_4
+; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s99
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -44825,11 +43528,8 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: v_or_b32_e32 v28, v29, v28
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v30
; SI-NEXT: v_or_b32_e32 v29, v31, v29
-; SI-NEXT: s_branch .LBB55_5
+; SI-NEXT: s_branch .LBB55_4
; SI-NEXT: .LBB55_3:
-; SI-NEXT: ; implicit-def: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67
-; SI-NEXT: s_branch .LBB55_2
-; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v0, s36
; SI-NEXT: v_mov_b32_e32 v1, s37
; SI-NEXT: v_mov_b32_e32 v2, s38
@@ -44862,7 +43562,7 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: v_mov_b32_e32 v29, s65
; SI-NEXT: v_mov_b32_e32 v30, s66
; SI-NEXT: v_mov_b32_e32 v31, s67
-; SI-NEXT: .LBB55_5: ; %end
+; SI-NEXT: .LBB55_4: ; %end
; SI-NEXT: v_readlane_b32 s99, v32, 35
; SI-NEXT: v_readlane_b32 s98, v32, 34
; SI-NEXT: v_readlane_b32 s97, v32, 33
@@ -45407,9 +44107,9 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX9-NEXT: s_pack_ll_b32_b16 s63, s63, s76
; GFX9-NEXT: s_pack_ll_b32_b16 s64, s74, s75
; GFX9-NEXT: s_pack_ll_b32_b16 s65, s72, s73
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
+; GFX9-NEXT: s_cbranch_execnz .LBB55_3
; GFX9-NEXT: .LBB55_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v29, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s36, v29 op_sel_hi:[1,0]
@@ -45442,10 +44142,8 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX9-NEXT: v_pk_add_f16 v27, s63, v29 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v28, s64, v29 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v29, s65, v29 op_sel_hi:[1,0]
-; GFX9-NEXT: s_branch .LBB55_5
+; GFX9-NEXT: s_branch .LBB55_4
; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
-; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s36
; GFX9-NEXT: v_mov_b32_e32 v1, s37
; GFX9-NEXT: v_mov_b32_e32 v2, s38
@@ -45478,7 +44176,7 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v29, s65
; GFX9-NEXT: v_mov_b32_e32 v30, s66
; GFX9-NEXT: v_mov_b32_e32 v31, s67
-; GFX9-NEXT: .LBB55_5: ; %end
+; GFX9-NEXT: .LBB55_4: ; %end
; GFX9-NEXT: v_readlane_b32 s65, v32, 13
; GFX9-NEXT: v_readlane_b32 s64, v32, 12
; GFX9-NEXT: v_readlane_b32 s55, v32, 11
@@ -45577,10 +44275,10 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX11-NEXT: s_pack_ll_b32_b16 s27, s56, s61
; GFX11-NEXT: s_pack_ll_b32_b16 s28, s46, s59
; GFX11-NEXT: s_pack_ll_b32_b16 s29, s45, s58
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-NEXT: .LBB55_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
@@ -45614,8 +44312,6 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX11-NEXT: v_pk_add_f16 v29, 0x200, s29 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
-; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
@@ -47943,7 +46639,7 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; VI-NEXT: s_lshr_b32 s36, s35, 16
; VI-NEXT: v_readfirstlane_b32 s4, v16
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB57_3
; VI-NEXT: .LBB57_2: ; %cmp.true
@@ -48141,8 +46837,6 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v60i16_to_v60f16_scalar:
; GFX9: ; %bb.0:
@@ -48214,9 +46908,9 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
+; GFX9-NEXT: s_cbranch_execnz .LBB57_3
; GFX9-NEXT: .LBB57_2: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s35, s75
; GFX9-NEXT: v_pk_add_u16 v29, s4, 3 op_sel_hi:[1,0]
@@ -48308,10 +47002,8 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v27
; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v28
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX9-NEXT: s_branch .LBB57_5
+; GFX9-NEXT: s_branch .LBB57_4
; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
-; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v29, s35
; GFX9-NEXT: v_mov_b32_e32 v28, s34
; GFX9-NEXT: v_mov_b32_e32 v27, s31
@@ -48372,7 +47064,7 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v57, s8
; GFX9-NEXT: v_mov_b32_e32 v58, s7
; GFX9-NEXT: v_mov_b32_e32 v59, s6
-; GFX9-NEXT: .LBB57_5: ; %end
+; GFX9-NEXT: .LBB57_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -48503,10 +47195,10 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s74, 16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s94, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s94, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s94
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s59, s89, s59
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s62, s92, s62
@@ -48598,10 +47290,8 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX11-TRUE16-NEXT: s_branch .LBB57_5
+; GFX11-TRUE16-NEXT: s_branch .LBB57_4
; GFX11-TRUE16-NEXT: .LBB57_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB57_2
-; GFX11-TRUE16-NEXT: .LBB57_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, s93 :: v_dual_mov_b32 v28, s92
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, s89 :: v_dual_mov_b32 v26, s91
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, s90 :: v_dual_mov_b32 v24, s88
@@ -48632,7 +47322,7 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v70, s9 :: v_dual_mov_b32 v71, s8
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v80, s6 :: v_dual_mov_b32 v81, s4
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v82, s5 :: v_dual_mov_b32 v83, s7
-; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB57_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v83, v83 :: v_dual_mov_b32 v82, v82
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v81, v81 :: v_dual_mov_b32 v80, v80
@@ -48729,10 +47419,10 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s74, 16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s94, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s94, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s94
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-FAKE16-NEXT: .LBB57_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s59, s89, s59
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s62, s92, s62
@@ -48824,10 +47514,8 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v26
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
-; GFX11-FAKE16-NEXT: s_branch .LBB57_5
+; GFX11-FAKE16-NEXT: s_branch .LBB57_4
; GFX11-FAKE16-NEXT: .LBB57_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB57_2
-; GFX11-FAKE16-NEXT: .LBB57_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s93 :: v_dual_mov_b32 v26, s92
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s89 :: v_dual_mov_b32 v28, s91
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v29, s90 :: v_dual_mov_b32 v20, s88
@@ -48858,7 +47546,7 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v70, s9 :: v_dual_mov_b32 v71, s8
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v80, s6 :: v_dual_mov_b32 v81, s4
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v82, s5 :: v_dual_mov_b32 v83, s7
-; GFX11-FAKE16-NEXT: .LBB57_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB57_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v84, 0xffff, v1
@@ -50039,9 +48727,9 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 ; 4-byte Folded Spill
-; SI-NEXT: s_cbranch_scc0 .LBB59_3
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB59_4
+; SI-NEXT: s_cbranch_execnz .LBB59_3
; SI-NEXT: .LBB59_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s35
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -50342,10 +49030,8 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v23, v14
; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; SI-NEXT: s_branch .LBB59_5
+; SI-NEXT: s_branch .LBB59_4
; SI-NEXT: .LBB59_3:
-; SI-NEXT: s_branch .LBB59_2
-; SI-NEXT: .LBB59_4:
; SI-NEXT: v_mov_b32_e32 v11, s13
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: v_mov_b32_e32 v12, s45
@@ -50411,7 +49097,7 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v60, s59
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; SI-NEXT: .LBB59_5: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v58
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -50614,9 +49300,9 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
+; VI-NEXT: s_cbranch_execnz .LBB59_3
; VI-NEXT: .LBB59_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v30, 0x200
; VI-NEXT: v_add_f16_e32 v0, s16, v30
@@ -50679,10 +49365,8 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v31, s47, v30
; VI-NEXT: v_add_f16_e32 v29, s44, v30
; VI-NEXT: v_add_f16_e32 v30, s45, v30
-; VI-NEXT: s_branch .LBB59_5
+; VI-NEXT: s_branch .LBB59_4
; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
-; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v30, s45
; VI-NEXT: v_mov_b32_e32 v29, s44
; VI-NEXT: v_mov_b32_e32 v31, s47
@@ -50743,7 +49427,7 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v59, s43
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: .LBB59_5: ; %end
+; VI-NEXT: .LBB59_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v59, 16, v59
; VI-NEXT: v_lshlrev_b32_e32 v58, 16, v58
; VI-NEXT: v_lshlrev_b32_e32 v57, 16, v57
@@ -50900,9 +49584,9 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
+; GFX9-NEXT: s_cbranch_execnz .LBB59_3
; GFX9-NEXT: .LBB59_2: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s35, s75
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
@@ -50995,10 +49679,8 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v27
; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v28
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX9-NEXT: s_branch .LBB59_5
+; GFX9-NEXT: s_branch .LBB59_4
; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
-; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v29, s35
; GFX9-NEXT: v_mov_b32_e32 v28, s34
; GFX9-NEXT: v_mov_b32_e32 v27, s31
@@ -51059,7 +49741,7 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v57, s8
; GFX9-NEXT: v_mov_b32_e32 v58, s7
; GFX9-NEXT: v_mov_b32_e32 v59, s6
-; GFX9-NEXT: .LBB59_5: ; %end
+; GFX9-NEXT: .LBB59_4: ; %end
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -51190,10 +49872,10 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s74, 16
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s94, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s94, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s94
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s59, s89, s59
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s62, s92, s62
@@ -51285,10 +49967,8 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX11-TRUE16-NEXT: s_branch .LBB59_5
+; GFX11-TRUE16-NEXT: s_branch .LBB59_4
; GFX11-TRUE16-NEXT: .LBB59_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB59_2
-; GFX11-TRUE16-NEXT: .LBB59_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, s93 :: v_dual_mov_b32 v28, s92
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, s89 :: v_dual_mov_b32 v26, s91
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, s90 :: v_dual_mov_b32 v24, s88
@@ -51319,7 +49999,7 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v70, s9 :: v_dual_mov_b32 v71, s8
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v80, s6 :: v_dual_mov_b32 v81, s4
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v82, s5 :: v_dual_mov_b32 v83, s7
-; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
+; GFX11-TRUE16-NEXT: .LBB59_4: ; %end
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v83, v83 :: v_dual_mov_b32 v82, v82
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v81, v81 :: v_dual_mov_b32 v80, v80
@@ -51416,10 +50096,10 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s74, 16
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s94, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s94, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s94
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-FAKE16-NEXT: .LBB59_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s59, s89, s59
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s62, s92, s62
@@ -51511,10 +50191,8 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v26
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
-; GFX11-FAKE16-NEXT: s_branch .LBB59_5
+; GFX11-FAKE16-NEXT: s_branch .LBB59_4
; GFX11-FAKE16-NEXT: .LBB59_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB59_2
-; GFX11-FAKE16-NEXT: .LBB59_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s93 :: v_dual_mov_b32 v26, s92
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s89 :: v_dual_mov_b32 v28, s91
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v29, s90 :: v_dual_mov_b32 v20, s88
@@ -51545,7 +50223,7 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v70, s9 :: v_dual_mov_b32 v71, s8
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v80, s6 :: v_dual_mov_b32 v81, s4
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v82, s5 :: v_dual_mov_b32 v83, s7
-; GFX11-FAKE16-NEXT: .LBB59_5: ; %end
+; GFX11-FAKE16-NEXT: .LBB59_4: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v84, 0xffff, v1
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll
index 687bea385a266..4c9f2511e9874 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll
@@ -89,7 +89,7 @@ define inreg <3 x float> @bitcast_v3i32_to_v3f32_scalar(<3 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_cbranch_execnz .LBB1_3
; SI-NEXT: .LBB1_2: ; %cmp.true
@@ -101,14 +101,12 @@ define inreg <3 x float> @bitcast_v3i32_to_v3f32_scalar(<3 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v3i32_to_v3f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB1_3
; VI-NEXT: .LBB1_2: ; %cmp.true
@@ -120,14 +118,12 @@ define inreg <3 x float> @bitcast_v3i32_to_v3f32_scalar(<3 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v3i32_to_v3f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB1_3
; GFX9-NEXT: .LBB1_2: ; %cmp.true
@@ -139,15 +135,13 @@ define inreg <3 x float> @bitcast_v3i32_to_v3f32_scalar(<3 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v3i32_to_v3f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
@@ -160,8 +154,6 @@ define inreg <3 x float> @bitcast_v3i32_to_v3f32_scalar(<3 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -261,17 +253,15 @@ define inreg <3 x i32> @bitcast_v3f32_to_v3i32_scalar(<3 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
+; SI-NEXT: s_cbranch_execnz .LBB3_3
; SI-NEXT: .LBB3_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v2, s18, 1.0
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
-; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -281,17 +271,15 @@ define inreg <3 x i32> @bitcast_v3f32_to_v3i32_scalar(<3 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: s_cbranch_execnz .LBB3_3
; VI-NEXT: .LBB3_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
-; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -301,17 +289,15 @@ define inreg <3 x i32> @bitcast_v3f32_to_v3i32_scalar(<3 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: s_cbranch_execnz .LBB3_3
; GFX9-NEXT: .LBB3_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
-; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -322,18 +308,16 @@ define inreg <3 x i32> @bitcast_v3f32_to_v3i32_scalar(<3 x float> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_3
; GFX11-NEXT: .LBB3_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
-; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -630,7 +614,7 @@ define inreg <12 x i8> @bitcast_v3i32_to_v12i8_scalar(<3 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s19, s17, 24
; SI-NEXT: s_lshr_b32 s22, s17, 16
@@ -669,23 +653,12 @@ define inreg <12 x i8> @bitcast_v3i32_to_v12i8_scalar(<3 x i32> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v10, s10
; SI-NEXT: v_mov_b32_e32 v11, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr23
-; SI-NEXT: ; implicit-def: $sgpr22
-; SI-NEXT: ; implicit-def: $sgpr19
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v3i32_to_v12i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s19, s16, 8
; VI-NEXT: s_lshr_b32 s10, s18, 16
@@ -724,23 +697,12 @@ define inreg <12 x i8> @bitcast_v3i32_to_v12i8_scalar(<3 x i32> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v10, s10
; VI-NEXT: v_mov_b32_e32 v11, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: ; implicit-def: $sgpr19
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v3i32_to_v12i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s19, s16, 8
; GFX9-NEXT: s_lshr_b32 s10, s18, 16
@@ -779,24 +741,13 @@ define inreg <12 x i8> @bitcast_v3i32_to_v12i8_scalar(<3 x i32> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v10, s10
; GFX9-NEXT: v_mov_b32_e32 v11, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: ; implicit-def: $sgpr19
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v3i32_to_v12i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s14, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s3, s2, 16
; GFX11-NEXT: s_lshr_b32 s8, s2, 8
@@ -830,17 +781,6 @@ define inreg <12 x i8> @bitcast_v3i32_to_v12i8_scalar(<3 x i32> inreg %a, i32 in
; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v9, s8
; GFX11-NEXT: v_dual_mov_b32 v10, s3 :: v_dual_mov_b32 v11, s6
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1289,7 +1229,7 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -1364,15 +1304,12 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v12i8_to_v3i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v2, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -1428,15 +1365,12 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; VI-NEXT: v_add_u32_e32 v2, vcc, 0x3000000, v2
; VI-NEXT: .LBB7_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v12i8_to_v3i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v2, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -1490,16 +1424,13 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB7_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v12i8_to_v3i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -1551,9 +1482,6 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; GFX11-NEXT: v_or_b32_e32 v2, v5, v6
; GFX11-NEXT: .LBB7_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1689,7 +1617,7 @@ define inreg <6 x bfloat> @bitcast_v3i32_to_v6bf16_scalar(<3 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s7, s18, 0xffff0000
; SI-NEXT: s_lshl_b32 s6, s18, 16
@@ -1722,20 +1650,12 @@ define inreg <6 x bfloat> @bitcast_v3i32_to_v6bf16_scalar(<3 x i32> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s6
; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v3i32_to_v6bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB9_3
; VI-NEXT: .LBB9_2: ; %cmp.true
@@ -1747,14 +1667,12 @@ define inreg <6 x bfloat> @bitcast_v3i32_to_v6bf16_scalar(<3 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v3i32_to_v6bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %cmp.true
@@ -1766,15 +1684,13 @@ define inreg <6 x bfloat> @bitcast_v3i32_to_v6bf16_scalar(<3 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v3i32_to_v6bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
@@ -1787,8 +1703,6 @@ define inreg <6 x bfloat> @bitcast_v3i32_to_v6bf16_scalar(<3 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2170,7 +2084,7 @@ define inreg <3 x i32> @bitcast_v6bf16_to_v3i32_scalar(<6 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v6, 1.0, s7
; SI-NEXT: v_mul_f32_e64 v10, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB11_4
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v12
; SI-NEXT: v_lshr_b64 v[0:1], v[8:9], 16
@@ -2200,17 +2114,14 @@ define inreg <3 x i32> @bitcast_v6bf16_to_v3i32_scalar(<6 x bfloat> inreg %a, i3
; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 16
; SI-NEXT: .LBB11_3: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB11_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2
-; SI-NEXT: s_branch .LBB11_2
;
; VI-LABEL: bitcast_v6bf16_to_v3i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
+; VI-NEXT: s_cbranch_execnz .LBB11_3
; VI-NEXT: .LBB11_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s18, 16
; VI-NEXT: v_mov_b32_e32 v5, 0x40c00000
@@ -2270,8 +2181,6 @@ define inreg <3 x i32> @bitcast_v6bf16_to_v3i32_scalar(<6 x bfloat> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v1, v3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
-; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2281,9 +2190,9 @@ define inreg <3 x i32> @bitcast_v6bf16_to_v3i32_scalar(<6 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
+; GFX9-NEXT: s_cbranch_execnz .LBB11_3
; GFX9-NEXT: .LBB11_2: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s18
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -2346,8 +2255,6 @@ define inreg <3 x i32> @bitcast_v6bf16_to_v3i32_scalar(<6 x bfloat> inreg %a, i3
; GFX9-NEXT: v_lshl_or_b32 v0, v4, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
-; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2358,10 +2265,10 @@ define inreg <3 x i32> @bitcast_v6bf16_to_v3i32_scalar(<6 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s3, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-TRUE16-NEXT: .LBB11_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s2
; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
@@ -2429,8 +2336,6 @@ define inreg <3 x i32> @bitcast_v6bf16_to_v3i32_scalar(<6 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v6.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB11_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB11_2
-; GFX11-TRUE16-NEXT: .LBB11_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s2
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -2440,10 +2345,10 @@ define inreg <3 x i32> @bitcast_v6bf16_to_v3i32_scalar(<6 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s3, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB11_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB11_3
; GFX11-FAKE16-NEXT: .LBB11_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s2
; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
@@ -2513,8 +2418,6 @@ define inreg <3 x i32> @bitcast_v6bf16_to_v3i32_scalar(<6 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v4, 16, v5
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB11_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB11_2
-; GFX11-FAKE16-NEXT: .LBB11_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s2
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
@@ -2638,7 +2541,7 @@ define inreg <6 x half> @bitcast_v3i32_to_v6f16_scalar(<3 x i32> inreg %a, i32 i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB13_4
+; SI-NEXT: s_cbranch_scc0 .LBB13_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s10, s17, 16
; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16
@@ -2665,17 +2568,12 @@ define inreg <6 x half> @bitcast_v3i32_to_v6f16_scalar(<3 x i32> inreg %a, i32 i
; SI-NEXT: v_mov_b32_e32 v1, s6
; SI-NEXT: v_mov_b32_e32 v2, s4
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB13_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: s_branch .LBB13_2
;
; VI-LABEL: bitcast_v3i32_to_v6f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
@@ -2687,14 +2585,12 @@ define inreg <6 x half> @bitcast_v3i32_to_v6f16_scalar(<3 x i32> inreg %a, i32 i
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v3i32_to_v6f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
@@ -2706,15 +2602,13 @@ define inreg <6 x half> @bitcast_v3i32_to_v6f16_scalar(<3 x i32> inreg %a, i32 i
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v3i32_to_v6f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
@@ -2727,8 +2621,6 @@ define inreg <6 x half> @bitcast_v3i32_to_v6f16_scalar(<3 x i32> inreg %a, i32 i
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2892,7 +2784,7 @@ define inreg <3 x i32> @bitcast_v6f16_to_v3i32_scalar(<6 x half> inreg %a, i32 i
; SI-NEXT: s_lshr_b32 s10, s17, 16
; SI-NEXT: s_lshr_b32 s11, s16, 16
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB15_3
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s11, 16
@@ -2903,7 +2795,7 @@ define inreg <3 x i32> @bitcast_v6f16_to_v3i32_scalar(<6 x half> inreg %a, i32 i
; SI-NEXT: s_and_b32 s6, s18, 0xffff
; SI-NEXT: s_lshl_b32 s8, s7, 16
; SI-NEXT: s_or_b32 s6, s6, s8
-; SI-NEXT: s_cbranch_execnz .LBB15_4
+; SI-NEXT: s_cbranch_execnz .LBB15_3
; SI-NEXT: .LBB15_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s11
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -2931,9 +2823,6 @@ define inreg <3 x i32> @bitcast_v6f16_to_v3i32_scalar(<6 x half> inreg %a, i32 i
; SI-NEXT: v_or_b32_e32 v2, v4, v2
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB15_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6
-; SI-NEXT: s_branch .LBB15_2
-; SI-NEXT: .LBB15_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -2943,9 +2832,9 @@ define inreg <3 x i32> @bitcast_v6f16_to_v3i32_scalar(<6 x half> inreg %a, i32 i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_3
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB15_4
+; VI-NEXT: s_cbranch_execnz .LBB15_3
; VI-NEXT: .LBB15_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s18, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -2965,8 +2854,6 @@ define inreg <3 x i32> @bitcast_v6f16_to_v3i32_scalar(<6 x half> inreg %a, i32 i
; VI-NEXT: v_or_b32_e32 v0, v0, v3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB15_3:
-; VI-NEXT: s_branch .LBB15_2
-; VI-NEXT: .LBB15_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2976,9 +2863,9 @@ define inreg <3 x i32> @bitcast_v6f16_to_v3i32_scalar(<6 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
+; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
@@ -2986,8 +2873,6 @@ define inreg <3 x i32> @bitcast_v6f16_to_v3i32_scalar(<6 x half> inreg %a, i32 i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
-; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2998,18 +2883,16 @@ define inreg <3 x i32> @bitcast_v6f16_to_v3i32_scalar(<6 x half> inreg %a, i32 i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -3133,7 +3016,7 @@ define inreg <6 x i16> @bitcast_v3i32_to_v6i16_scalar(<3 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB17_4
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s10, s17, 16
; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16
@@ -3160,17 +3043,12 @@ define inreg <6 x i16> @bitcast_v3i32_to_v6i16_scalar(<3 x i32> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v1, s6
; SI-NEXT: v_mov_b32_e32 v2, s4
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB17_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: s_branch .LBB17_2
;
; VI-LABEL: bitcast_v3i32_to_v6i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
@@ -3182,14 +3060,12 @@ define inreg <6 x i16> @bitcast_v3i32_to_v6i16_scalar(<3 x i32> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v3i32_to_v6i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
@@ -3201,15 +3077,13 @@ define inreg <6 x i16> @bitcast_v3i32_to_v6i16_scalar(<3 x i32> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v3i32_to_v6i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
@@ -3222,8 +3096,6 @@ define inreg <6 x i16> @bitcast_v3i32_to_v6i16_scalar(<3 x i32> inreg %a, i32 in
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3374,7 +3246,7 @@ define inreg <3 x i32> @bitcast_v6i16_to_v3i32_scalar(<6 x i16> inreg %a, i32 in
; SI-NEXT: s_lshr_b32 s10, s17, 16
; SI-NEXT: s_lshr_b32 s11, s16, 16
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB19_4
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s11, 16
@@ -3407,15 +3279,12 @@ define inreg <3 x i32> @bitcast_v6i16_to_v3i32_scalar(<6 x i16> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB19_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6
-; SI-NEXT: s_branch .LBB19_2
;
; VI-LABEL: bitcast_v6i16_to_v3i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_4
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB19_3
; VI-NEXT: .LBB19_2: ; %cmp.true
@@ -3439,24 +3308,20 @@ define inreg <3 x i32> @bitcast_v6i16_to_v3i32_scalar(<6 x i16> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB19_4:
-; VI-NEXT: s_branch .LBB19_2
;
; GFX9-LABEL: bitcast_v6i16_to_v3i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
+; GFX9-NEXT: s_cbranch_execnz .LBB19_3
; GFX9-NEXT: .LBB19_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
-; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -3467,18 +3332,16 @@ define inreg <3 x i32> @bitcast_v6i16_to_v3i32_scalar(<6 x i16> inreg %a, i32 in
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
-; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -3773,7 +3636,7 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB21_3
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s19, s17, 24
; SI-NEXT: s_lshr_b32 s22, s17, 16
@@ -3784,7 +3647,7 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16
; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8
-; SI-NEXT: s_cbranch_execnz .LBB21_4
+; SI-NEXT: s_cbranch_execnz .LBB21_3
; SI-NEXT: .LBB21_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v8, s18, 1.0
; SI-NEXT: v_add_f32_e64 v17, s17, 1.0
@@ -3798,19 +3661,8 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v17
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v17
; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v17
-; SI-NEXT: s_branch .LBB21_5
+; SI-NEXT: s_branch .LBB21_4
; SI-NEXT: .LBB21_3:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr23
-; SI-NEXT: ; implicit-def: $sgpr22
-; SI-NEXT: ; implicit-def: $sgpr19
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: s_branch .LBB21_2
-; SI-NEXT: .LBB21_4:
; SI-NEXT: v_mov_b32_e32 v16, s16
; SI-NEXT: v_mov_b32_e32 v17, s17
; SI-NEXT: v_mov_b32_e32 v8, s18
@@ -3823,7 +3675,7 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v3, s4
; SI-NEXT: v_mov_b32_e32 v14, s6
; SI-NEXT: v_mov_b32_e32 v1, s8
-; SI-NEXT: .LBB21_5: ; %end
+; SI-NEXT: .LBB21_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, v16
; SI-NEXT: v_mov_b32_e32 v2, v14
; SI-NEXT: v_mov_b32_e32 v4, v17
@@ -3834,7 +3686,7 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_3
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s19, s16, 8
; VI-NEXT: s_lshr_b32 s10, s18, 16
@@ -3845,7 +3697,7 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; VI-NEXT: s_lshr_b32 s14, s16, 16
; VI-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB21_4
+; VI-NEXT: s_cbranch_execnz .LBB21_3
; VI-NEXT: .LBB21_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v8, s18, 1.0
; VI-NEXT: v_add_f32_e64 v14, s17, 1.0
@@ -3859,19 +3711,8 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; VI-NEXT: v_lshrrev_b32_e32 v5, 8, v14
; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v13
; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v13
-; VI-NEXT: s_branch .LBB21_5
+; VI-NEXT: s_branch .LBB21_4
; VI-NEXT: .LBB21_3:
-; VI-NEXT: ; implicit-def: $sgpr19
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB21_2
-; VI-NEXT: .LBB21_4:
; VI-NEXT: v_mov_b32_e32 v13, s16
; VI-NEXT: v_mov_b32_e32 v14, s17
; VI-NEXT: v_mov_b32_e32 v8, s18
@@ -3884,7 +3725,7 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s10
; VI-NEXT: v_mov_b32_e32 v11, s6
; VI-NEXT: v_mov_b32_e32 v3, s4
-; VI-NEXT: .LBB21_5: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, v13
; VI-NEXT: v_mov_b32_e32 v4, v14
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -3893,7 +3734,7 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s19, s16, 8
; GFX9-NEXT: s_lshr_b32 s10, s18, 16
@@ -3904,7 +3745,7 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; GFX9-NEXT: s_lshr_b32 s14, s16, 16
; GFX9-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
+; GFX9-NEXT: s_cbranch_execnz .LBB21_3
; GFX9-NEXT: .LBB21_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v8, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v14, s17, 1.0
@@ -3918,19 +3759,8 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v14
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v13
-; GFX9-NEXT: s_branch .LBB21_5
+; GFX9-NEXT: s_branch .LBB21_4
; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: ; implicit-def: $sgpr19
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB21_2
-; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v13, s16
; GFX9-NEXT: v_mov_b32_e32 v14, s17
; GFX9-NEXT: v_mov_b32_e32 v8, s18
@@ -3943,7 +3773,7 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s10
; GFX9-NEXT: v_mov_b32_e32 v11, s6
; GFX9-NEXT: v_mov_b32_e32 v3, s4
-; GFX9-NEXT: .LBB21_5: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, v13
; GFX9-NEXT: v_mov_b32_e32 v4, v14
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -3953,7 +3783,7 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s8, s2, 16
; GFX11-NEXT: s_lshr_b32 s9, s2, 8
@@ -3965,7 +3795,7 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
; GFX11-NEXT: .LBB21_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v8, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v14, s1, 1.0
@@ -3980,19 +3810,8 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v13
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v13
-; GFX11-NEXT: s_branch .LBB21_5
+; GFX11-NEXT: s_branch .LBB21_4
; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB21_2
-; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v13, s0 :: v_dual_mov_b32 v14, s1
; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v1, s14
; GFX11-NEXT: v_dual_mov_b32 v2, s12 :: v_dual_mov_b32 v5, s13
@@ -4000,7 +3819,7 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v9, s9 :: v_dual_mov_b32 v10, s8
; GFX11-NEXT: v_mov_b32_e32 v11, s6
; GFX11-NEXT: v_mov_b32_e32 v3, s4
-; GFX11-NEXT: .LBB21_5: ; %end
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: v_mov_b32_e32 v0, v13
; GFX11-NEXT: v_mov_b32_e32 v4, v14
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -4452,7 +4271,7 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -4527,15 +4346,12 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v12i8_to_v3f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v2, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -4591,15 +4407,12 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v2, vcc, 0x3000000, v2
; VI-NEXT: .LBB23_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v12i8_to_v3f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v2, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -4653,16 +4466,13 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB23_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v12i8_to_v3f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -4714,9 +4524,6 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; GFX11-NEXT: v_or_b32_e32 v2, v5, v6
; GFX11-NEXT: .LBB23_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2
-; GFX11-NEXT: s_branch .LBB23_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4851,7 +4658,7 @@ define inreg <6 x bfloat> @bitcast_v3f32_to_v6bf16_scalar(<3 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB25_3
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s18, 0xffff0000
; SI-NEXT: s_lshl_b32 s7, s18, 16
@@ -4859,7 +4666,7 @@ define inreg <6 x bfloat> @bitcast_v3f32_to_v6bf16_scalar(<3 x float> inreg %a,
; SI-NEXT: s_lshl_b32 s9, s17, 16
; SI-NEXT: s_and_b32 s10, s16, 0xffff0000
; SI-NEXT: s_lshl_b32 s11, s16, 16
-; SI-NEXT: s_cbranch_execnz .LBB25_4
+; SI-NEXT: s_cbranch_execnz .LBB25_3
; SI-NEXT: .LBB25_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
@@ -4870,23 +4677,15 @@ define inreg <6 x bfloat> @bitcast_v3f32_to_v6bf16_scalar(<3 x float> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v0
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: s_branch .LBB25_5
+; SI-NEXT: s_branch .LBB25_4
; SI-NEXT: .LBB25_3:
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB25_2
-; SI-NEXT: .LBB25_4:
; SI-NEXT: v_mov_b32_e32 v0, s11
; SI-NEXT: v_mov_b32_e32 v1, s10
; SI-NEXT: v_mov_b32_e32 v2, s9
; SI-NEXT: v_mov_b32_e32 v5, s8
; SI-NEXT: v_mov_b32_e32 v3, s7
; SI-NEXT: v_mov_b32_e32 v4, s6
-; SI-NEXT: .LBB25_5: ; %end
+; SI-NEXT: .LBB25_4: ; %end
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
@@ -4905,17 +4704,15 @@ define inreg <6 x bfloat> @bitcast_v3f32_to_v6bf16_scalar(<3 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB25_3
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_4
+; VI-NEXT: s_cbranch_execnz .LBB25_3
; VI-NEXT: .LBB25_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB25_3:
-; VI-NEXT: s_branch .LBB25_2
-; VI-NEXT: .LBB25_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -4926,17 +4723,15 @@ define inreg <6 x bfloat> @bitcast_v3f32_to_v6bf16_scalar(<3 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_4
+; GFX9-NEXT: s_cbranch_execnz .LBB25_3
; GFX9-NEXT: .LBB25_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB25_3:
-; GFX9-NEXT: s_branch .LBB25_2
-; GFX9-NEXT: .LBB25_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -4948,18 +4743,16 @@ define inreg <6 x bfloat> @bitcast_v3f32_to_v6bf16_scalar(<3 x float> inreg %a,
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_3
; GFX11-NEXT: .LBB25_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
-; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -5344,7 +5137,7 @@ define inreg <3 x float> @bitcast_v6bf16_to_v3f32_scalar(<6 x bfloat> inreg %a,
; SI-NEXT: v_mul_f32_e64 v6, 1.0, s7
; SI-NEXT: v_mul_f32_e64 v10, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB27_4
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v12
; SI-NEXT: v_lshr_b64 v[0:1], v[8:9], 16
@@ -5374,17 +5167,14 @@ define inreg <3 x float> @bitcast_v6bf16_to_v3f32_scalar(<6 x bfloat> inreg %a,
; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 16
; SI-NEXT: .LBB27_3: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB27_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2
-; SI-NEXT: s_branch .LBB27_2
;
; VI-LABEL: bitcast_v6bf16_to_v3f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB27_3
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_4
+; VI-NEXT: s_cbranch_execnz .LBB27_3
; VI-NEXT: .LBB27_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s18, 16
; VI-NEXT: v_mov_b32_e32 v5, 0x40c00000
@@ -5444,8 +5234,6 @@ define inreg <3 x float> @bitcast_v6bf16_to_v3f32_scalar(<6 x bfloat> inreg %a,
; VI-NEXT: v_mov_b32_e32 v1, v3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB27_3:
-; VI-NEXT: s_branch .LBB27_2
-; VI-NEXT: .LBB27_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -5455,9 +5243,9 @@ define inreg <3 x float> @bitcast_v6bf16_to_v3f32_scalar(<6 x bfloat> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_4
+; GFX9-NEXT: s_cbranch_execnz .LBB27_3
; GFX9-NEXT: .LBB27_2: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s18
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -5520,8 +5308,6 @@ define inreg <3 x float> @bitcast_v6bf16_to_v3f32_scalar(<6 x bfloat> inreg %a,
; GFX9-NEXT: v_lshl_or_b32 v0, v4, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB27_3:
-; GFX9-NEXT: s_branch .LBB27_2
-; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -5532,10 +5318,10 @@ define inreg <3 x float> @bitcast_v6bf16_to_v3f32_scalar(<6 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s3, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB27_3
; GFX11-TRUE16-NEXT: .LBB27_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s2
; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
@@ -5603,8 +5389,6 @@ define inreg <3 x float> @bitcast_v6bf16_to_v3f32_scalar(<6 x bfloat> inreg %a,
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v6.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB27_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB27_2
-; GFX11-TRUE16-NEXT: .LBB27_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s2
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -5614,10 +5398,10 @@ define inreg <3 x float> @bitcast_v6bf16_to_v3f32_scalar(<6 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s3, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB27_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB27_3
; GFX11-FAKE16-NEXT: .LBB27_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s2
; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
@@ -5687,8 +5471,6 @@ define inreg <3 x float> @bitcast_v6bf16_to_v3f32_scalar(<6 x bfloat> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v4, 16, v5
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB27_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB27_2
-; GFX11-FAKE16-NEXT: .LBB27_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s2
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
@@ -5811,12 +5593,12 @@ define inreg <6 x half> @bitcast_v3f32_to_v6f16_scalar(<3 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s10, s17, 16
; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB29_4
+; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v2, s18, 1.0
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
@@ -5824,20 +5606,15 @@ define inreg <6 x half> @bitcast_v3f32_to_v6f16_scalar(<3 x float> inreg %a, i32
; SI-NEXT: v_lshr_b64 v[5:6], v[0:1], 16
; SI-NEXT: v_lshr_b64 v[3:4], v[2:3], 16
; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v1
-; SI-NEXT: s_branch .LBB29_5
+; SI-NEXT: s_branch .LBB29_4
; SI-NEXT: .LBB29_3:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: s_branch .LBB29_2
-; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v4, s10
; SI-NEXT: v_mov_b32_e32 v3, s4
; SI-NEXT: v_mov_b32_e32 v5, s6
-; SI-NEXT: .LBB29_5: ; %end
+; SI-NEXT: .LBB29_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
@@ -5853,17 +5630,15 @@ define inreg <6 x half> @bitcast_v3f32_to_v6f16_scalar(<3 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
+; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
-; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -5874,17 +5649,15 @@ define inreg <6 x half> @bitcast_v3f32_to_v6f16_scalar(<3 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
+; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
-; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -5896,18 +5669,16 @@ define inreg <6 x half> @bitcast_v3f32_to_v6f16_scalar(<3 x float> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
; GFX11-NEXT: .LBB29_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
-; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -6074,7 +5845,7 @@ define inreg <3 x float> @bitcast_v6f16_to_v3f32_scalar(<6 x half> inreg %a, i32
; SI-NEXT: s_lshr_b32 s10, s17, 16
; SI-NEXT: s_lshr_b32 s11, s16, 16
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB31_3
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s11, 16
@@ -6085,7 +5856,7 @@ define inreg <3 x float> @bitcast_v6f16_to_v3f32_scalar(<6 x half> inreg %a, i32
; SI-NEXT: s_and_b32 s6, s18, 0xffff
; SI-NEXT: s_lshl_b32 s8, s7, 16
; SI-NEXT: s_or_b32 s6, s6, s8
-; SI-NEXT: s_cbranch_execnz .LBB31_4
+; SI-NEXT: s_cbranch_execnz .LBB31_3
; SI-NEXT: .LBB31_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s11
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -6113,9 +5884,6 @@ define inreg <3 x float> @bitcast_v6f16_to_v3f32_scalar(<6 x half> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v2, v4, v2
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB31_3:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6
-; SI-NEXT: s_branch .LBB31_2
-; SI-NEXT: .LBB31_4:
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -6125,9 +5893,9 @@ define inreg <3 x float> @bitcast_v6f16_to_v3f32_scalar(<6 x half> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_3
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB31_4
+; VI-NEXT: s_cbranch_execnz .LBB31_3
; VI-NEXT: .LBB31_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s18, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
@@ -6147,8 +5915,6 @@ define inreg <3 x float> @bitcast_v6f16_to_v3f32_scalar(<6 x half> inreg %a, i32
; VI-NEXT: v_or_b32_e32 v0, v0, v3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB31_3:
-; VI-NEXT: s_branch .LBB31_2
-; VI-NEXT: .LBB31_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -6158,9 +5924,9 @@ define inreg <3 x float> @bitcast_v6f16_to_v3f32_scalar(<6 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_4
+; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
@@ -6168,8 +5934,6 @@ define inreg <3 x float> @bitcast_v6f16_to_v3f32_scalar(<6 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB31_3:
-; GFX9-NEXT: s_branch .LBB31_2
-; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -6180,18 +5944,16 @@ define inreg <3 x float> @bitcast_v6f16_to_v3f32_scalar(<6 x half> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-NEXT: .LBB31_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
-; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -6314,12 +6076,12 @@ define inreg <6 x i16> @bitcast_v3f32_to_v6i16_scalar(<3 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s10, s17, 16
; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16
; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16
-; SI-NEXT: s_cbranch_execnz .LBB33_4
+; SI-NEXT: s_cbranch_execnz .LBB33_3
; SI-NEXT: .LBB33_2: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v2, s18, 1.0
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
@@ -6327,20 +6089,15 @@ define inreg <6 x i16> @bitcast_v3f32_to_v6i16_scalar(<3 x float> inreg %a, i32
; SI-NEXT: v_lshr_b64 v[5:6], v[0:1], 16
; SI-NEXT: v_lshr_b64 v[3:4], v[2:3], 16
; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v1
-; SI-NEXT: s_branch .LBB33_5
+; SI-NEXT: s_branch .LBB33_4
; SI-NEXT: .LBB33_3:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: s_branch .LBB33_2
-; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v4, s10
; SI-NEXT: v_mov_b32_e32 v3, s4
; SI-NEXT: v_mov_b32_e32 v5, s6
-; SI-NEXT: .LBB33_5: ; %end
+; SI-NEXT: .LBB33_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
@@ -6356,17 +6113,15 @@ define inreg <6 x i16> @bitcast_v3f32_to_v6i16_scalar(<3 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_4
+; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB33_3:
-; VI-NEXT: s_branch .LBB33_2
-; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -6377,17 +6132,15 @@ define inreg <6 x i16> @bitcast_v3f32_to_v6i16_scalar(<3 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
+; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: s_branch .LBB33_2
-; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -6399,18 +6152,16 @@ define inreg <6 x i16> @bitcast_v3f32_to_v6i16_scalar(<3 x float> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
; GFX11-NEXT: .LBB33_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: s_branch .LBB33_2
-; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -6564,7 +6315,7 @@ define inreg <3 x float> @bitcast_v6i16_to_v3f32_scalar(<6 x i16> inreg %a, i32
; SI-NEXT: s_lshr_b32 s10, s17, 16
; SI-NEXT: s_lshr_b32 s11, s16, 16
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB35_4
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s11, 16
@@ -6597,15 +6348,12 @@ define inreg <3 x float> @bitcast_v6i16_to_v3f32_scalar(<6 x i16> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB35_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6
-; SI-NEXT: s_branch .LBB35_2
;
; VI-LABEL: bitcast_v6i16_to_v3f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB35_4
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB35_3
; VI-NEXT: .LBB35_2: ; %cmp.true
@@ -6629,24 +6377,20 @@ define inreg <3 x float> @bitcast_v6i16_to_v3f32_scalar(<6 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB35_4:
-; VI-NEXT: s_branch .LBB35_2
;
; GFX9-LABEL: bitcast_v6i16_to_v3f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
+; GFX9-NEXT: s_cbranch_execnz .LBB35_3
; GFX9-NEXT: .LBB35_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
-; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -6657,18 +6401,16 @@ define inreg <3 x float> @bitcast_v6i16_to_v3f32_scalar(<6 x i16> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-NEXT: .LBB35_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
-; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -7139,7 +6881,7 @@ define inreg <6 x bfloat> @bitcast_v12i8_to_v6bf16_scalar(<12 x i8> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB37_4
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s4, s4, 16
@@ -7226,20 +6968,12 @@ define inreg <6 x bfloat> @bitcast_v12i8_to_v6bf16_scalar(<12 x i8> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s9
; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_4:
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: s_branch .LBB37_2
;
; VI-LABEL: bitcast_v12i8_to_v6bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v2, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -7295,15 +7029,12 @@ define inreg <6 x bfloat> @bitcast_v12i8_to_v6bf16_scalar(<12 x i8> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v2, vcc, 0x3000000, v2
; VI-NEXT: .LBB37_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v12i8_to_v6bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v2, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -7357,16 +7088,13 @@ define inreg <6 x bfloat> @bitcast_v12i8_to_v6bf16_scalar(<12 x i8> inreg %a, i3
; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB37_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v12i8_to_v6bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -7418,9 +7146,6 @@ define inreg <6 x bfloat> @bitcast_v12i8_to_v6bf16_scalar(<12 x i8> inreg %a, i3
; GFX11-NEXT: v_or_b32_e32 v2, v5, v6
; GFX11-NEXT: .LBB37_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX11-NEXT: s_branch .LBB37_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7975,7 +7700,7 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s7
; SI-NEXT: v_mul_f32_e64 v19, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v9, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB39_4
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v21
; SI-NEXT: v_lshr_b64 v[16:17], v[0:1], 16
@@ -8025,23 +7750,12 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v8, v13
; SI-NEXT: v_mov_b32_e32 v9, v12
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB39_4:
-; SI-NEXT: ; implicit-def: $vgpr16
-; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: ; implicit-def: $vgpr14
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr8
-; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: ; implicit-def: $vgpr13
-; SI-NEXT: ; implicit-def: $vgpr12
-; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: s_branch .LBB39_2
;
; VI-LABEL: bitcast_v6bf16_to_v12i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB39_3
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s19, s18, 16
; VI-NEXT: s_lshr_b32 s15, s18, 8
@@ -8052,7 +7766,7 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; VI-NEXT: s_lshr_b32 s13, s16, 8
; VI-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB39_4
+; VI-NEXT: s_cbranch_execnz .LBB39_3
; VI-NEXT: .LBB39_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v3, 0x40c00000
@@ -8120,19 +7834,8 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; VI-NEXT: v_lshrrev_b32_e32 v5, 8, v4
; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v0
-; VI-NEXT: s_branch .LBB39_5
+; VI-NEXT: s_branch .LBB39_4
; VI-NEXT: .LBB39_3:
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr19
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB39_2
-; VI-NEXT: .LBB39_4:
; VI-NEXT: v_mov_b32_e32 v8, s18
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v4, s17
@@ -8145,7 +7848,7 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v5, s10
; VI-NEXT: v_mov_b32_e32 v11, s6
; VI-NEXT: v_mov_b32_e32 v14, s4
-; VI-NEXT: .LBB39_5: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: v_mov_b32_e32 v3, v14
; VI-NEXT: v_mov_b32_e32 v9, v13
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -8154,7 +7857,7 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s19, s17, 16
; GFX9-NEXT: s_lshr_b32 s15, s18, 16
@@ -8165,7 +7868,7 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s12, s16, 8
; GFX9-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
+; GFX9-NEXT: s_cbranch_execnz .LBB39_3
; GFX9-NEXT: .LBB39_2: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s17
; GFX9-NEXT: v_mov_b32_e32 v3, 0x40c00000
@@ -8240,17 +7943,6 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v4, v13
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr19
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB39_2
-; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v8, s18
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v6, s19
@@ -8270,7 +7962,7 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s3, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB39_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s2, 8
@@ -8282,7 +7974,7 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-TRUE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB39_3
; GFX11-TRUE16-NEXT: .LBB39_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
@@ -8365,17 +8057,6 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v13
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB39_3:
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
-; GFX11-TRUE16-NEXT: s_branch .LBB39_2
-; GFX11-TRUE16-NEXT: .LBB39_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v9, s12
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s10
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s14 :: v_dual_mov_b32 v7, s8
@@ -8389,7 +8070,7 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s3, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB39_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s2, 8
@@ -8401,7 +8082,7 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-FAKE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB39_3
; GFX11-FAKE16-NEXT: .LBB39_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
@@ -8484,17 +8165,6 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v13
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB39_3:
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
-; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
-; GFX11-FAKE16-NEXT: s_branch .LBB39_2
-; GFX11-FAKE16-NEXT: .LBB39_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v9, s12
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s10
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s14 :: v_dual_mov_b32 v7, s8
@@ -8967,7 +8637,7 @@ define inreg <6 x half> @bitcast_v12i8_to_v6f16_scalar(<12 x i8> inreg %a, i32 i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB41_4
+; SI-NEXT: s_cbranch_scc0 .LBB41_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -9058,19 +8728,12 @@ define inreg <6 x half> @bitcast_v12i8_to_v6f16_scalar(<12 x i8> inreg %a, i32 i
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB41_4:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: s_branch .LBB41_2
;
; VI-LABEL: bitcast_v12i8_to_v6f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB41_4
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v2, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -9126,15 +8789,12 @@ define inreg <6 x half> @bitcast_v12i8_to_v6f16_scalar(<12 x i8> inreg %a, i32 i
; VI-NEXT: v_add_u32_e32 v2, vcc, 0x3000000, v2
; VI-NEXT: .LBB41_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB41_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; VI-NEXT: s_branch .LBB41_2
;
; GFX9-LABEL: bitcast_v12i8_to_v6f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v2, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -9188,16 +8848,13 @@ define inreg <6 x half> @bitcast_v12i8_to_v6f16_scalar(<12 x i8> inreg %a, i32 i
; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB41_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB41_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX9-NEXT: s_branch .LBB41_2
;
; GFX11-LABEL: bitcast_v12i8_to_v6f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -9249,9 +8906,6 @@ define inreg <6 x half> @bitcast_v12i8_to_v6f16_scalar(<12 x i8> inreg %a, i32 i
; GFX11-NEXT: v_or_b32_e32 v2, v5, v6
; GFX11-NEXT: .LBB41_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB41_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX11-NEXT: s_branch .LBB41_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9594,7 +9248,7 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; SI-NEXT: s_lshr_b32 s15, s17, 16
; SI-NEXT: s_lshr_b32 s20, s16, 16
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB43_3
+; SI-NEXT: s_cbranch_scc0 .LBB43_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s20, 16
@@ -9612,7 +9266,7 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; SI-NEXT: s_lshr_b32 s9, s11, 8
; SI-NEXT: s_bfe_u32 s19, s15, 0x80008
; SI-NEXT: s_bfe_u32 s21, s14, 0x80008
-; SI-NEXT: s_cbranch_execnz .LBB43_4
+; SI-NEXT: s_cbranch_execnz .LBB43_3
; SI-NEXT: .LBB43_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s20
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -9645,19 +9299,8 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; SI-NEXT: v_lshrrev_b32_e32 v9, 8, v8
; SI-NEXT: v_bfe_u32 v7, v6, 8, 8
; SI-NEXT: v_bfe_u32 v11, v10, 8, 8
-; SI-NEXT: s_branch .LBB43_5
+; SI-NEXT: s_branch .LBB43_4
; SI-NEXT: .LBB43_3:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr19
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr21
-; SI-NEXT: s_branch .LBB43_2
-; SI-NEXT: .LBB43_4:
; SI-NEXT: v_mov_b32_e32 v10, s14
; SI-NEXT: v_mov_b32_e32 v6, s15
; SI-NEXT: v_mov_b32_e32 v11, s21
@@ -9670,7 +9313,7 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; SI-NEXT: v_mov_b32_e32 v3, s6
; SI-NEXT: v_mov_b32_e32 v4, s8
; SI-NEXT: v_mov_b32_e32 v1, s10
-; SI-NEXT: .LBB43_5: ; %end
+; SI-NEXT: .LBB43_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, v12
; SI-NEXT: v_mov_b32_e32 v2, v4
; SI-NEXT: v_mov_b32_e32 v4, v13
@@ -9680,7 +9323,7 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB43_3
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s19, s16, 16
; VI-NEXT: s_lshr_b32 s14, s18, 16
@@ -9691,7 +9334,7 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; VI-NEXT: s_lshr_b32 s12, s16, 8
; VI-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; VI-NEXT: s_cbranch_execnz .LBB43_4
+; VI-NEXT: s_cbranch_execnz .LBB43_3
; VI-NEXT: .LBB43_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v1, 0x200
@@ -9719,17 +9362,6 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; VI-NEXT: v_mov_b32_e32 v4, v13
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB43_3:
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr19
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB43_2
-; VI-NEXT: .LBB43_4:
; VI-NEXT: v_mov_b32_e32 v2, s19
; VI-NEXT: v_mov_b32_e32 v6, s15
; VI-NEXT: v_mov_b32_e32 v10, s14
@@ -9748,7 +9380,7 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s19, s16, 8
; GFX9-NEXT: s_lshr_b32 s10, s18, 16
@@ -9759,7 +9391,7 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; GFX9-NEXT: s_lshr_b32 s14, s16, 16
; GFX9-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
+; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, s17, v0 op_sel_hi:[1,0]
@@ -9775,19 +9407,8 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v15
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v14
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v14
-; GFX9-NEXT: s_branch .LBB43_5
+; GFX9-NEXT: s_branch .LBB43_4
; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: ; implicit-def: $sgpr19
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB43_2
-; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v14, s16
; GFX9-NEXT: v_mov_b32_e32 v15, s17
; GFX9-NEXT: v_mov_b32_e32 v8, s18
@@ -9800,7 +9421,7 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; GFX9-NEXT: v_mov_b32_e32 v10, s10
; GFX9-NEXT: v_mov_b32_e32 v11, s6
; GFX9-NEXT: v_mov_b32_e32 v3, s4
-; GFX9-NEXT: .LBB43_5: ; %end
+; GFX9-NEXT: .LBB43_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, v14
; GFX9-NEXT: v_mov_b32_e32 v4, v15
; GFX9-NEXT: v_mov_b32_e32 v9, v13
@@ -9811,7 +9432,7 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s8, s2, 16
; GFX11-NEXT: s_lshr_b32 s9, s2, 8
@@ -9823,7 +9444,7 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-NEXT: .LBB43_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s0 op_sel_hi:[0,1]
@@ -9840,19 +9461,8 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v15
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v14
-; GFX11-NEXT: s_branch .LBB43_5
+; GFX11-NEXT: s_branch .LBB43_4
; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB43_2
-; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s1
; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v1, s14
; GFX11-NEXT: v_dual_mov_b32 v2, s12 :: v_dual_mov_b32 v5, s13
@@ -9860,7 +9470,7 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; GFX11-NEXT: v_dual_mov_b32 v13, s9 :: v_dual_mov_b32 v10, s8
; GFX11-NEXT: v_mov_b32_e32 v11, s6
; GFX11-NEXT: v_mov_b32_e32 v3, s4
-; GFX11-NEXT: .LBB43_5: ; %end
+; GFX11-NEXT: .LBB43_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX11-NEXT: v_dual_mov_b32 v0, v14 :: v_dual_mov_b32 v9, v13
; GFX11-NEXT: v_mov_b32_e32 v4, v15
@@ -10330,7 +9940,7 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB45_4
+; SI-NEXT: s_cbranch_scc0 .LBB45_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
@@ -10421,19 +10031,12 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB45_4:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: s_branch .LBB45_2
;
; VI-LABEL: bitcast_v12i8_to_v6i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB45_4
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v2, 0xc0c0004
; VI-NEXT: v_mov_b32_e32 v1, s19
@@ -10489,15 +10092,12 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in
; VI-NEXT: v_add_u32_e32 v2, vcc, 0x3000000, v2
; VI-NEXT: .LBB45_3: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; VI-NEXT: s_branch .LBB45_2
;
; GFX9-LABEL: bitcast_v12i8_to_v6i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_mov_b32_e32 v2, 0xc0c0004
; GFX9-NEXT: v_mov_b32_e32 v1, s19
@@ -10551,16 +10151,13 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in
; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: .LBB45_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_4:
-; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX9-NEXT: s_branch .LBB45_2
;
; GFX11-LABEL: bitcast_v12i8_to_v6i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_mov_b32_e32 v0, 0xc0c0004
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -10612,9 +10209,6 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in
; GFX11-NEXT: v_or_b32_e32 v2, v5, v6
; GFX11-NEXT: .LBB45_3: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; GFX11-NEXT: s_branch .LBB45_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10950,7 +10544,7 @@ define inreg <12 x i8> @bitcast_v6i16_to_v12i8_scalar(<6 x i16> inreg %a, i32 in
; SI-NEXT: s_lshr_b32 s15, s17, 16
; SI-NEXT: s_lshr_b32 s20, s16, 16
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB47_4
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s20, 16
@@ -11008,23 +10602,12 @@ define inreg <12 x i8> @bitcast_v6i16_to_v12i8_scalar(<6 x i16> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v10, s14
; SI-NEXT: v_mov_b32_e32 v11, s11
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB47_4:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr19
-; SI-NEXT: ; implicit-def: $sgpr21
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: s_branch .LBB47_2
;
; VI-LABEL: bitcast_v6i16_to_v12i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB47_4
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s19, s16, 8
; VI-NEXT: s_lshr_b32 s10, s18, 16
@@ -11075,23 +10658,12 @@ define inreg <12 x i8> @bitcast_v6i16_to_v12i8_scalar(<6 x i16> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v10, s10
; VI-NEXT: v_mov_b32_e32 v11, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB47_4:
-; VI-NEXT: ; implicit-def: $sgpr19
-; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: ; implicit-def: $sgpr4
-; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: ; implicit-def: $sgpr13
-; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB47_2
;
; GFX9-LABEL: bitcast_v6i16_to_v12i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s19, s16, 8
; GFX9-NEXT: s_lshr_b32 s10, s18, 16
@@ -11102,7 +10674,7 @@ define inreg <12 x i8> @bitcast_v6i16_to_v12i8_scalar(<6 x i16> inreg %a, i32 in
; GFX9-NEXT: s_lshr_b32 s14, s16, 16
; GFX9-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
+; GFX9-NEXT: s_cbranch_execnz .LBB47_3
; GFX9-NEXT: .LBB47_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v8, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, s17, 3 op_sel_hi:[1,0]
@@ -11116,19 +10688,8 @@ define inreg <12 x i8> @bitcast_v6i16_to_v12i8_scalar(<6 x i16> inreg %a, i32 in
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v14
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v13
-; GFX9-NEXT: s_branch .LBB47_5
+; GFX9-NEXT: s_branch .LBB47_4
; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: ; implicit-def: $sgpr19
-; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr4
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB47_2
-; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v13, s16
; GFX9-NEXT: v_mov_b32_e32 v14, s17
; GFX9-NEXT: v_mov_b32_e32 v8, s18
@@ -11141,7 +10702,7 @@ define inreg <12 x i8> @bitcast_v6i16_to_v12i8_scalar(<6 x i16> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v10, s10
; GFX9-NEXT: v_mov_b32_e32 v11, s6
; GFX9-NEXT: v_mov_b32_e32 v3, s4
-; GFX9-NEXT: .LBB47_5: ; %end
+; GFX9-NEXT: .LBB47_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, v13
; GFX9-NEXT: v_mov_b32_e32 v4, v14
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -11151,7 +10712,7 @@ define inreg <12 x i8> @bitcast_v6i16_to_v12i8_scalar(<6 x i16> inreg %a, i32 in
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s8, s2, 16
; GFX11-NEXT: s_lshr_b32 s9, s2, 8
@@ -11163,7 +10724,7 @@ define inreg <12 x i8> @bitcast_v6i16_to_v12i8_scalar(<6 x i16> inreg %a, i32 in
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-NEXT: .LBB47_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v8, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s1, 3 op_sel_hi:[1,0]
@@ -11178,19 +10739,8 @@ define inreg <12 x i8> @bitcast_v6i16_to_v12i8_scalar(<6 x i16> inreg %a, i32 in
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v13
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v13
-; GFX11-NEXT: s_branch .LBB47_5
+; GFX11-NEXT: s_branch .LBB47_4
; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v13, s0 :: v_dual_mov_b32 v14, s1
; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v1, s14
; GFX11-NEXT: v_dual_mov_b32 v2, s12 :: v_dual_mov_b32 v5, s13
@@ -11198,7 +10748,7 @@ define inreg <12 x i8> @bitcast_v6i16_to_v12i8_scalar(<6 x i16> inreg %a, i32 in
; GFX11-NEXT: v_dual_mov_b32 v9, s9 :: v_dual_mov_b32 v10, s8
; GFX11-NEXT: v_mov_b32_e32 v11, s6
; GFX11-NEXT: v_mov_b32_e32 v3, s4
-; GFX11-NEXT: .LBB47_5: ; %end
+; GFX11-NEXT: .LBB47_4: ; %end
; GFX11-NEXT: v_mov_b32_e32 v0, v13
; GFX11-NEXT: v_mov_b32_e32 v4, v14
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -11597,7 +11147,7 @@ define inreg <6 x half> @bitcast_v6bf16_to_v6f16_scalar(<6 x bfloat> inreg %a, i
; SI-NEXT: v_mul_f32_e64 v10, 1.0, s9
; SI-NEXT: v_mul_f32_e64 v15, 1.0, s4
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s5
-; SI-NEXT: s_cbranch_scc0 .LBB49_4
+; SI-NEXT: s_cbranch_scc0 .LBB49_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v14
; SI-NEXT: v_lshr_b64 v[1:2], v[5:6], 16
@@ -11640,20 +11190,14 @@ define inreg <6 x half> @bitcast_v6bf16_to_v6f16_scalar(<6 x bfloat> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; SI-NEXT: v_or_b32_e32 v2, v2, v3
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB49_4:
-; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: ; implicit-def: $vgpr12
-; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: ; implicit-def: $vgpr8
-; SI-NEXT: s_branch .LBB49_2
;
; VI-LABEL: bitcast_v6bf16_to_v6f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB49_3
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB49_4
+; VI-NEXT: s_cbranch_execnz .LBB49_3
; VI-NEXT: .LBB49_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v3, 0x40c00000
@@ -11713,8 +11257,6 @@ define inreg <6 x half> @bitcast_v6bf16_to_v6f16_scalar(<6 x bfloat> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v1, v3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB49_3:
-; VI-NEXT: s_branch .LBB49_2
-; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -11725,9 +11267,9 @@ define inreg <6 x half> @bitcast_v6bf16_to_v6f16_scalar(<6 x bfloat> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
+; GFX9-NEXT: s_cbranch_execnz .LBB49_3
; GFX9-NEXT: .LBB49_2: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s16
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -11790,8 +11332,6 @@ define inreg <6 x half> @bitcast_v6bf16_to_v6f16_scalar(<6 x bfloat> inreg %a, i
; GFX9-NEXT: v_lshl_or_b32 v0, v3, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: s_branch .LBB49_2
-; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -11803,10 +11343,10 @@ define inreg <6 x half> @bitcast_v6bf16_to_v6f16_scalar(<6 x bfloat> inreg %a, i
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s0
; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -11872,8 +11412,6 @@ define inreg <6 x half> @bitcast_v6bf16_to_v6f16_scalar(<6 x bfloat> inreg %a, i
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v6.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB49_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB49_2
-; GFX11-TRUE16-NEXT: .LBB49_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -11883,10 +11421,10 @@ define inreg <6 x half> @bitcast_v6bf16_to_v6f16_scalar(<6 x bfloat> inreg %a, i
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s0
; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -11956,8 +11494,6 @@ define inreg <6 x half> @bitcast_v6bf16_to_v6f16_scalar(<6 x bfloat> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v4, 16, v5
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB49_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB49_2
-; GFX11-FAKE16-NEXT: .LBB49_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
@@ -12130,7 +11666,7 @@ define inreg <6 x bfloat> @bitcast_v6f16_to_v6bf16_scalar(<6 x half> inreg %a, i
; SI-NEXT: s_lshr_b32 s7, s17, 16
; SI-NEXT: s_lshr_b32 s6, s16, 16
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB51_3
+; SI-NEXT: s_cbranch_scc0 .LBB51_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s9, s16, 16
; SI-NEXT: s_lshl_b32 s10, s6, 16
@@ -12138,7 +11674,7 @@ define inreg <6 x bfloat> @bitcast_v6f16_to_v6bf16_scalar(<6 x half> inreg %a, i
; SI-NEXT: s_lshl_b32 s12, s7, 16
; SI-NEXT: s_lshl_b32 s13, s18, 16
; SI-NEXT: s_lshl_b32 s14, s8, 16
-; SI-NEXT: s_cbranch_execnz .LBB51_4
+; SI-NEXT: s_cbranch_execnz .LBB51_3
; SI-NEXT: .LBB51_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s8
; SI-NEXT: v_cvt_f32_f16_e32 v1, s18
@@ -12164,23 +11700,15 @@ define inreg <6 x bfloat> @bitcast_v6f16_to_v6bf16_scalar(<6 x half> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; SI-NEXT: s_branch .LBB51_5
+; SI-NEXT: s_branch .LBB51_4
; SI-NEXT: .LBB51_3:
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: s_branch .LBB51_2
-; SI-NEXT: .LBB51_4:
; SI-NEXT: v_mov_b32_e32 v4, s14
; SI-NEXT: v_mov_b32_e32 v3, s13
; SI-NEXT: v_mov_b32_e32 v5, s12
; SI-NEXT: v_mov_b32_e32 v2, s11
; SI-NEXT: v_mov_b32_e32 v1, s10
; SI-NEXT: v_mov_b32_e32 v0, s9
-; SI-NEXT: .LBB51_5: ; %end
+; SI-NEXT: .LBB51_4: ; %end
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0
@@ -12199,9 +11727,9 @@ define inreg <6 x bfloat> @bitcast_v6f16_to_v6bf16_scalar(<6 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB51_3
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB51_4
+; VI-NEXT: s_cbranch_execnz .LBB51_3
; VI-NEXT: .LBB51_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -12221,8 +11749,6 @@ define inreg <6 x bfloat> @bitcast_v6f16_to_v6bf16_scalar(<6 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v3, v4
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB51_3:
-; VI-NEXT: s_branch .LBB51_2
-; VI-NEXT: .LBB51_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -12233,9 +11759,9 @@ define inreg <6 x bfloat> @bitcast_v6f16_to_v6bf16_scalar(<6 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB51_4
+; GFX9-NEXT: s_cbranch_execnz .LBB51_3
; GFX9-NEXT: .LBB51_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
@@ -12243,8 +11769,6 @@ define inreg <6 x bfloat> @bitcast_v6f16_to_v6bf16_scalar(<6 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB51_3:
-; GFX9-NEXT: s_branch .LBB51_2
-; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -12256,18 +11780,16 @@ define inreg <6 x bfloat> @bitcast_v6f16_to_v6bf16_scalar(<6 x half> inreg %a, i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-NEXT: .LBB51_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
-; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -12658,7 +12180,7 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v0, 1.0, s6
; SI-NEXT: v_mul_f32_e64 v10, 1.0, s5
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s4
-; SI-NEXT: s_cbranch_scc0 .LBB53_4
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v11
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v4
@@ -12699,22 +12221,14 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v5
; SI-NEXT: v_or_b32_e32 v2, v2, v3
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB53_4:
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr9
-; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: ; implicit-def: $vgpr8
-; SI-NEXT: ; implicit-def: $vgpr6
-; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: s_branch .LBB53_2
;
; VI-LABEL: bitcast_v6bf16_to_v6i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB53_3
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB53_4
+; VI-NEXT: s_cbranch_execnz .LBB53_3
; VI-NEXT: .LBB53_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v3, 0x40c00000
@@ -12774,8 +12288,6 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v1, v3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB53_3:
-; VI-NEXT: s_branch .LBB53_2
-; VI-NEXT: .LBB53_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -12786,9 +12298,9 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB53_4
+; GFX9-NEXT: s_cbranch_execnz .LBB53_3
; GFX9-NEXT: .LBB53_2: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s16
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
@@ -12848,8 +12360,6 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3
; GFX9-NEXT: v_and_or_b32 v0, v3, v6, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB53_3:
-; GFX9-NEXT: s_branch .LBB53_2
-; GFX9-NEXT: .LBB53_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -12861,10 +12371,10 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s0
; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -12921,8 +12431,6 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v6.h
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB53_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB53_2
-; GFX11-TRUE16-NEXT: .LBB53_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -12932,10 +12440,10 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s0
; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
@@ -12996,8 +12504,6 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3
; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v7
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB53_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB53_2
-; GFX11-FAKE16-NEXT: .LBB53_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
@@ -13157,7 +12663,7 @@ define inreg <6 x bfloat> @bitcast_v6i16_to_v6bf16_scalar(<6 x i16> inreg %a, i3
; SI-NEXT: s_lshr_b32 s13, s17, 16
; SI-NEXT: s_lshr_b32 s12, s16, 16
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB55_4
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s8, s16, 16
; SI-NEXT: s_lshl_b32 s11, s12, 16
@@ -13202,20 +12708,12 @@ define inreg <6 x bfloat> @bitcast_v6i16_to_v6bf16_scalar(<6 x i16> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s6
; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB55_4:
-; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: s_branch .LBB55_2
;
; VI-LABEL: bitcast_v6i16_to_v6bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_4
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB55_3
; VI-NEXT: .LBB55_2: ; %cmp.true
@@ -13239,24 +12737,20 @@ define inreg <6 x bfloat> @bitcast_v6i16_to_v6bf16_scalar(<6 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB55_4:
-; VI-NEXT: s_branch .LBB55_2
;
; GFX9-LABEL: bitcast_v6i16_to_v6bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
+; GFX9-NEXT: s_cbranch_execnz .LBB55_3
; GFX9-NEXT: .LBB55_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
-; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -13268,18 +12762,16 @@ define inreg <6 x bfloat> @bitcast_v6i16_to_v6bf16_scalar(<6 x i16> inreg %a, i3
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-NEXT: .LBB55_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
-; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -13430,9 +12922,9 @@ define inreg <6 x i16> @bitcast_v6f16_to_v6i16_scalar(<6 x half> inreg %a, i32 i
; SI-NEXT: s_lshr_b32 s6, s17, 16
; SI-NEXT: s_lshr_b32 s8, s16, 16
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB57_3
+; SI-NEXT: s_cbranch_scc0 .LBB57_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB57_4
+; SI-NEXT: s_cbranch_execnz .LBB57_3
; SI-NEXT: .LBB57_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, s8
; SI-NEXT: v_cvt_f32_f16_e32 v1, s16
@@ -13459,17 +12951,15 @@ define inreg <6 x i16> @bitcast_v6f16_to_v6i16_scalar(<6 x half> inreg %a, i32 i
; SI-NEXT: v_or_b32_e32 v1, v1, v2
; SI-NEXT: v_lshr_b64 v[2:3], v[0:1], 16
; SI-NEXT: v_or_b32_e32 v3, v8, v7
-; SI-NEXT: s_branch .LBB57_5
+; SI-NEXT: s_branch .LBB57_4
; SI-NEXT: .LBB57_3:
-; SI-NEXT: s_branch .LBB57_2
-; SI-NEXT: .LBB57_4:
; SI-NEXT: v_mov_b32_e32 v4, s7
; SI-NEXT: v_mov_b32_e32 v6, s6
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v3, s18
; SI-NEXT: v_mov_b32_e32 v5, s16
; SI-NEXT: v_mov_b32_e32 v2, s8
-; SI-NEXT: .LBB57_5: ; %end
+; SI-NEXT: .LBB57_4: ; %end
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v5
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v0, v0, v2
@@ -13485,9 +12975,9 @@ define inreg <6 x i16> @bitcast_v6f16_to_v6i16_scalar(<6 x half> inreg %a, i32 i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_3
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB57_4
+; VI-NEXT: s_cbranch_execnz .LBB57_3
; VI-NEXT: .LBB57_2: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -13507,8 +12997,6 @@ define inreg <6 x i16> @bitcast_v6f16_to_v6i16_scalar(<6 x half> inreg %a, i32 i
; VI-NEXT: v_or_b32_e32 v0, v3, v4
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB57_3:
-; VI-NEXT: s_branch .LBB57_2
-; VI-NEXT: .LBB57_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -13519,9 +13007,9 @@ define inreg <6 x i16> @bitcast_v6f16_to_v6i16_scalar(<6 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
+; GFX9-NEXT: s_cbranch_execnz .LBB57_3
; GFX9-NEXT: .LBB57_2: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
@@ -13529,8 +13017,6 @@ define inreg <6 x i16> @bitcast_v6f16_to_v6i16_scalar(<6 x half> inreg %a, i32 i
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
-; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -13542,18 +13028,16 @@ define inreg <6 x i16> @bitcast_v6f16_to_v6i16_scalar(<6 x half> inreg %a, i32 i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB57_3
; GFX11-NEXT: .LBB57_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB57_3:
-; GFX11-NEXT: s_branch .LBB57_2
-; GFX11-NEXT: .LBB57_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -13715,7 +13199,7 @@ define inreg <6 x half> @bitcast_v6i16_to_v6f16_scalar(<6 x i16> inreg %a, i32 i
; SI-NEXT: s_lshr_b32 s11, s17, 16
; SI-NEXT: s_lshr_b32 s13, s16, 16
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB59_4
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s5, s18, 0xffff
; SI-NEXT: s_lshl_b32 s6, s10, 16
@@ -13762,17 +13246,12 @@ define inreg <6 x half> @bitcast_v6i16_to_v6f16_scalar(<6 x i16> inreg %a, i32 i
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB59_4:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: s_branch .LBB59_2
;
; VI-LABEL: bitcast_v6i16_to_v6f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_4
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_cbranch_execnz .LBB59_3
; VI-NEXT: .LBB59_2: ; %cmp.true
@@ -13796,24 +13275,20 @@ define inreg <6 x half> @bitcast_v6i16_to_v6f16_scalar(<6 x i16> inreg %a, i32 i
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB59_4:
-; VI-NEXT: s_branch .LBB59_2
;
; GFX9-LABEL: bitcast_v6i16_to_v6f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
+; GFX9-NEXT: s_cbranch_execnz .LBB59_3
; GFX9-NEXT: .LBB59_2: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
-; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -13825,18 +13300,16 @@ define inreg <6 x half> @bitcast_v6i16_to_v6f16_scalar(<6 x i16> inreg %a, i32 i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
; GFX11-NEXT: ; %bb.1: ; %Flow
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-NEXT: s_cbranch_vccnz .LBB59_3
; GFX11-NEXT: .LBB59_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB59_3:
-; GFX11-NEXT: s_branch .LBB59_2
-; GFX11-NEXT: .LBB59_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/bug-vopc-commute.ll b/llvm/test/CodeGen/AMDGPU/bug-vopc-commute.ll
index 21d4bcfcdc8c1..cd86b2bfe5deb 100644
--- a/llvm/test/CodeGen/AMDGPU/bug-vopc-commute.ll
+++ b/llvm/test/CodeGen/AMDGPU/bug-vopc-commute.ll
@@ -22,11 +22,10 @@ define amdgpu_vs float @main(i32 %v) {
; GFX6-NEXT: s_buffer_load_dword s0, s[0:3], 0xf4
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_cmp_eq_u32 s0, 0
-; GFX6-NEXT: s_cbranch_scc0 .LBB0_6
-; GFX6-NEXT: ; %bb.5: ; %ENDIF62
-; GFX6-NEXT: s_branch .LBB0_7
-; GFX6-NEXT: .LBB0_6: ; %IF63
-; GFX6-NEXT: .LBB0_7:
+; GFX6-NEXT: s_cbranch_scc0 .LBB0_5
+; GFX6-NEXT: s_branch .LBB0_6
+; GFX6-NEXT: .LBB0_5: ; %IF63
+; GFX6-NEXT: .LBB0_6:
;
; GFX8-LABEL: main:
; GFX8: ; %bb.0: ; %main_body
@@ -44,11 +43,10 @@ define amdgpu_vs float @main(i32 %v) {
; GFX8-NEXT: s_buffer_load_dword s0, s[0:3], 0x3d0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_cmp_eq_u32 s0, 0
-; GFX8-NEXT: s_cbranch_scc0 .LBB0_6
-; GFX8-NEXT: ; %bb.5: ; %ENDIF62
-; GFX8-NEXT: s_branch .LBB0_7
-; GFX8-NEXT: .LBB0_6: ; %IF63
-; GFX8-NEXT: .LBB0_7:
+; GFX8-NEXT: s_cbranch_scc0 .LBB0_5
+; GFX8-NEXT: s_branch .LBB0_6
+; GFX8-NEXT: .LBB0_5: ; %IF63
+; GFX8-NEXT: .LBB0_6:
main_body:
%d1 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> poison, i32 960, i32 0)
%d2 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> poison, i32 976, i32 0)
diff --git a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
index 87b761268f1f1..f3ebdb4e1c380 100644
--- a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
+++ b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
@@ -1982,7 +1982,7 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; CISI-NEXT: s_waitcnt lgkmcnt(0)
; CISI-NEXT: s_or_b64 s[0:1], s[10:11], s[2:3]
; CISI-NEXT: s_cmp_lg_u32 s1, 0
-; CISI-NEXT: s_cbranch_scc0 .LBB16_4
+; CISI-NEXT: s_cbranch_scc0 .LBB16_2
; CISI-NEXT: ; %bb.1:
; CISI-NEXT: v_cvt_f32_u32_e32 v0, s2
; CISI-NEXT: v_cvt_f32_u32_e32 v1, s3
@@ -2120,9 +2120,6 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; CISI-NEXT: s_mov_b32 s10, -1
; CISI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
; CISI-NEXT: s_endpgm
-; CISI-NEXT: .LBB16_4:
-; CISI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; CISI-NEXT: s_branch .LBB16_2
;
; VI-LABEL: sudiv64:
; VI: ; %bb.0:
@@ -2131,7 +2128,7 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_or_b64 s[6:7], s[2:3], s[4:5]
; VI-NEXT: s_cmp_lg_u32 s7, 0
-; VI-NEXT: s_cbranch_scc0 .LBB16_3
+; VI-NEXT: s_cbranch_scc0 .LBB16_2
; VI-NEXT: ; %bb.1:
; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
; VI-NEXT: v_cvt_f32_u32_e32 v1, s5
@@ -2237,7 +2234,7 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; VI-NEXT: s_cmp_lg_u32 s3, 0
; VI-NEXT: s_cselect_b32 s9, s15, s11
; VI-NEXT: s_cselect_b32 s8, s13, s10
-; VI-NEXT: s_cbranch_execnz .LBB16_4
+; VI-NEXT: s_cbranch_execnz .LBB16_3
; VI-NEXT: .LBB16_2:
; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
; VI-NEXT: s_sub_i32 s3, 0, s4
@@ -2262,14 +2259,11 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; VI-NEXT: s_cselect_b64 vcc, -1, 0
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
; VI-NEXT: v_mov_b32_e32 v1, 0
-; VI-NEXT: s_branch .LBB16_5
+; VI-NEXT: s_branch .LBB16_4
; VI-NEXT: .LBB16_3:
-; VI-NEXT: ; implicit-def: $sgpr8_sgpr9
-; VI-NEXT: s_branch .LBB16_2
-; VI-NEXT: .LBB16_4:
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: v_mov_b32_e32 v1, s9
-; VI-NEXT: .LBB16_5:
+; VI-NEXT: .LBB16_4:
; VI-NEXT: v_mov_b32_e32 v2, s0
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
@@ -2282,7 +2276,7 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_or_b64 s[4:5], s[2:3], s[6:7]
; GFX9-NEXT: s_cmp_lg_u32 s5, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB16_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB16_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s6
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s7
@@ -2421,9 +2415,6 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX9-NEXT: v_mov_b32_e32 v1, s9
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9-NEXT: s_endpgm
-; GFX9-NEXT: .LBB16_4:
-; GFX9-NEXT: ; implicit-def: $sgpr8_sgpr9
-; GFX9-NEXT: s_branch .LBB16_2
;
; GFX1010-LABEL: sudiv64:
; GFX1010: ; %bb.0:
@@ -2434,7 +2425,7 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1010-NEXT: s_waitcnt lgkmcnt(0)
; GFX1010-NEXT: s_or_b64 s[4:5], s[2:3], s[6:7]
; GFX1010-NEXT: s_cmp_lg_u32 s5, 0
-; GFX1010-NEXT: s_cbranch_scc0 .LBB16_4
+; GFX1010-NEXT: s_cbranch_scc0 .LBB16_2
; GFX1010-NEXT: ; %bb.1:
; GFX1010-NEXT: v_cvt_f32_u32_e32 v0, s6
; GFX1010-NEXT: v_cvt_f32_u32_e32 v1, s7
@@ -2574,9 +2565,6 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1010-NEXT: v_mov_b32_e32 v1, s5
; GFX1010-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1010-NEXT: s_endpgm
-; GFX1010-NEXT: .LBB16_4:
-; GFX1010-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX1010-NEXT: s_branch .LBB16_2
;
; GFX1030W32-LABEL: sudiv64:
; GFX1030W32: ; %bb.0:
@@ -2587,7 +2575,7 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1030W32-NEXT: s_waitcnt lgkmcnt(0)
; GFX1030W32-NEXT: s_or_b64 s[6:7], s[2:3], s[4:5]
; GFX1030W32-NEXT: s_cmp_lg_u32 s7, 0
-; GFX1030W32-NEXT: s_cbranch_scc0 .LBB16_4
+; GFX1030W32-NEXT: s_cbranch_scc0 .LBB16_2
; GFX1030W32-NEXT: ; %bb.1:
; GFX1030W32-NEXT: v_cvt_f32_u32_e32 v0, s4
; GFX1030W32-NEXT: v_cvt_f32_u32_e32 v1, s5
@@ -2727,9 +2715,6 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1030W32-NEXT: v_mov_b32_e32 v1, s7
; GFX1030W32-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1030W32-NEXT: s_endpgm
-; GFX1030W32-NEXT: .LBB16_4:
-; GFX1030W32-NEXT: ; implicit-def: $sgpr6_sgpr7
-; GFX1030W32-NEXT: s_branch .LBB16_2
;
; GFX1030W64-LABEL: sudiv64:
; GFX1030W64: ; %bb.0:
@@ -2739,7 +2724,7 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1030W64-NEXT: s_waitcnt lgkmcnt(0)
; GFX1030W64-NEXT: s_or_b64 s[6:7], s[2:3], s[4:5]
; GFX1030W64-NEXT: s_cmp_lg_u32 s7, 0
-; GFX1030W64-NEXT: s_cbranch_scc0 .LBB16_4
+; GFX1030W64-NEXT: s_cbranch_scc0 .LBB16_2
; GFX1030W64-NEXT: ; %bb.1:
; GFX1030W64-NEXT: v_cvt_f32_u32_e32 v0, s4
; GFX1030W64-NEXT: v_cvt_f32_u32_e32 v1, s5
@@ -2878,9 +2863,6 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1030W64-NEXT: v_mov_b32_e32 v1, s7
; GFX1030W64-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1030W64-NEXT: s_endpgm
-; GFX1030W64-NEXT: .LBB16_4:
-; GFX1030W64-NEXT: ; implicit-def: $sgpr6_sgpr7
-; GFX1030W64-NEXT: s_branch .LBB16_2
;
; GFX11-LABEL: sudiv64:
; GFX11: ; %bb.0:
@@ -2892,7 +2874,7 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX11-NEXT: s_or_b64 s[6:7], s[2:3], s[4:5]
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_cmp_lg_u32 s7, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB16_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB16_2
; GFX11-NEXT: ; %bb.1:
; GFX11-NEXT: v_cvt_f32_u32_e32 v0, s4
; GFX11-NEXT: v_cvt_f32_u32_e32 v1, s5
@@ -3046,9 +3028,6 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s7
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_endpgm
-; GFX11-NEXT: .LBB16_4:
-; GFX11-NEXT: ; implicit-def: $sgpr6_sgpr7
-; GFX11-NEXT: s_branch .LBB16_2
;
; GFX1250-LABEL: sudiv64:
; GFX1250: ; %bb.0:
@@ -3061,7 +3040,7 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_cmp_lg_u32 s5, 0
; GFX1250-NEXT: s_mov_b32 s5, 0
-; GFX1250-NEXT: s_cbranch_scc0 .LBB16_4
+; GFX1250-NEXT: s_cbranch_scc0 .LBB16_2
; GFX1250-NEXT: ; %bb.1:
; GFX1250-NEXT: s_cvt_f32_u32 s4, s6
; GFX1250-NEXT: s_cvt_f32_u32 s8, s7
@@ -3198,9 +3177,6 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1250-NEXT: s_endpgm
-; GFX1250-NEXT: .LBB16_4:
-; GFX1250-NEXT: ; implicit-def: $sgpr8_sgpr9
-; GFX1250-NEXT: s_branch .LBB16_2
%result = udiv i64 %x, %y
store i64 %result, ptr addrspace(1) %out
ret void
@@ -3221,5 +3197,3 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GCN-ISEL: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/ctpop16.ll b/llvm/test/CodeGen/AMDGPU/ctpop16.ll
index fca57be5764f8..59f829a8d345b 100644
--- a/llvm/test/CodeGen/AMDGPU/ctpop16.ll
+++ b/llvm/test/CodeGen/AMDGPU/ctpop16.ll
@@ -1491,7 +1491,7 @@ define amdgpu_kernel void @ctpop_i16_in_br(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s4, s6, 16
-; SI-NEXT: s_cbranch_scc0 .LBB14_4
+; SI-NEXT: s_cbranch_scc0 .LBB14_2
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: s_mov_b32 s11, 0xf000
; SI-NEXT: s_mov_b32 s10, -1
@@ -1510,9 +1510,6 @@ define amdgpu_kernel void @ctpop_i16_in_br(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
-; SI-NEXT: .LBB14_4:
-; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: s_branch .LBB14_2
;
; VI-LABEL: ctpop_i16_in_br:
; VI: ; %bb.0: ; %entry
@@ -1520,7 +1517,7 @@ define amdgpu_kernel void @ctpop_i16_in_br(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshr_b32 s4, s6, 16
-; VI-NEXT: s_cbranch_scc0 .LBB14_4
+; VI-NEXT: s_cbranch_scc0 .LBB14_2
; VI-NEXT: ; %bb.1: ; %else
; VI-NEXT: s_mov_b32 s11, 0xf000
; VI-NEXT: s_mov_b32 s10, -1
@@ -1539,9 +1536,6 @@ define amdgpu_kernel void @ctpop_i16_in_br(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_short v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
-; VI-NEXT: .LBB14_4:
-; VI-NEXT: ; implicit-def: $vgpr0
-; VI-NEXT: s_branch .LBB14_2
;
; EG-LABEL: ctpop_i16_in_br:
; EG: ; %bb.0: ; %entry
diff --git a/llvm/test/CodeGen/AMDGPU/ctpop64.ll b/llvm/test/CodeGen/AMDGPU/ctpop64.ll
index 37f5889918c41..5d92df4900abe 100644
--- a/llvm/test/CodeGen/AMDGPU/ctpop64.ll
+++ b/llvm/test/CodeGen/AMDGPU/ctpop64.ll
@@ -339,7 +339,7 @@ define amdgpu_kernel void @ctpop_i64_in_br(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s8, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x2
; SI-NEXT: s_mov_b64 s[2:3], 0
@@ -357,9 +357,6 @@ define amdgpu_kernel void @ctpop_i64_in_br(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: ctpop_i64_in_br:
; VI: ; %bb.0: ; %entry
@@ -368,7 +365,7 @@ define amdgpu_kernel void @ctpop_i64_in_br(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s8, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %else
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x8
; VI-NEXT: s_cbranch_execnz .LBB7_3
@@ -384,9 +381,6 @@ define amdgpu_kernel void @ctpop_i64_in_br(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; VI-NEXT: s_branch .LBB7_2
entry:
%tmp0 = icmp eq i32 %cond, 0
br i1 %tmp0, label %if, label %else
diff --git a/llvm/test/CodeGen/AMDGPU/divergent-branch-uniform-condition.ll b/llvm/test/CodeGen/AMDGPU/divergent-branch-uniform-condition.ll
index 92fb345851479..aefccf2a255f1 100644
--- a/llvm/test/CodeGen/AMDGPU/divergent-branch-uniform-condition.ll
+++ b/llvm/test/CodeGen/AMDGPU/divergent-branch-uniform-condition.ll
@@ -40,13 +40,13 @@ define amdgpu_ps void @main(i32 %0, float %1) {
; ISA-NEXT: s_and_b64 s[6:7], s[6:7], exec
; ISA-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
; ISA-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; ISA-NEXT: s_cbranch_execz .LBB0_7
+; ISA-NEXT: s_cbranch_execz .LBB0_6
; ISA-NEXT: .LBB0_3: ; %loop
; ISA-NEXT: ; =>This Inner Loop Header: Depth=1
; ISA-NEXT: s_or_b64 s[4:5], s[4:5], exec
; ISA-NEXT: s_cmp_lt_u32 s8, 32
; ISA-NEXT: s_mov_b64 s[6:7], -1
-; ISA-NEXT: s_cbranch_scc0 .LBB0_6
+; ISA-NEXT: s_cbranch_scc0 .LBB0_2
; ISA-NEXT: ; %bb.4: ; %endif1
; ISA-NEXT: ; in Loop: Header=BB0_3 Depth=1
; ISA-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -56,16 +56,13 @@ define amdgpu_ps void @main(i32 %0, float %1) {
; ISA-NEXT: s_add_i32 s8, s8, 1
; ISA-NEXT: s_xor_b64 s[6:7], exec, -1
; ISA-NEXT: s_branch .LBB0_1
-; ISA-NEXT: .LBB0_6: ; in Loop: Header=BB0_3 Depth=1
-; ISA-NEXT: ; implicit-def: $sgpr8
-; ISA-NEXT: s_branch .LBB0_2
-; ISA-NEXT: .LBB0_7: ; %Flow2
+; ISA-NEXT: .LBB0_6: ; %Flow2
; ISA-NEXT: s_or_b64 exec, exec, s[0:1]
; ISA-NEXT: v_mov_b32_e32 v1, 0
; ISA-NEXT: s_and_saveexec_b64 s[0:1], s[2:3]
-; ISA-NEXT: ; %bb.8: ; %if1
+; ISA-NEXT: ; %bb.7: ; %if1
; ISA-NEXT: v_sqrt_f32_e32 v1, v0
-; ISA-NEXT: ; %bb.9: ; %endloop
+; ISA-NEXT: ; %bb.8: ; %endloop
; ISA-NEXT: s_or_b64 exec, exec, s[0:1]
; ISA-NEXT: exp mrt0, v1, v1, v1, v1 done vm
; ISA-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/dynamic_stackalloc.ll b/llvm/test/CodeGen/AMDGPU/dynamic_stackalloc.ll
index d19a260db3550..5ccebf13a163b 100644
--- a/llvm/test/CodeGen/AMDGPU/dynamic_stackalloc.ll
+++ b/llvm/test/CodeGen/AMDGPU/dynamic_stackalloc.ll
@@ -831,7 +831,7 @@ define amdgpu_kernel void @test_dynamic_stackalloc_kernel_control_flow(i32 %n, i
; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-SDAG-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-SDAG-NEXT: s_mov_b32 s4, 0
-; GFX9-SDAG-NEXT: s_cbranch_scc0 .LBB7_6
+; GFX9-SDAG-NEXT: s_cbranch_scc0 .LBB7_4
; GFX9-SDAG-NEXT: ; %bb.1: ; %bb.1
; GFX9-SDAG-NEXT: v_lshl_add_u32 v0, v0, 2, 15
; GFX9-SDAG-NEXT: v_and_b32_e32 v0, 0x1ff0, v0
@@ -865,8 +865,6 @@ define amdgpu_kernel void @test_dynamic_stackalloc_kernel_control_flow(i32 %n, i
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
; GFX9-SDAG-NEXT: .LBB7_5: ; %bb.2
; GFX9-SDAG-NEXT: s_endpgm
-; GFX9-SDAG-NEXT: .LBB7_6:
-; GFX9-SDAG-NEXT: s_branch .LBB7_4
;
; GFX9-GISEL-LABEL: test_dynamic_stackalloc_kernel_control_flow:
; GFX9-GISEL: ; %bb.0: ; %entry
@@ -926,7 +924,7 @@ define amdgpu_kernel void @test_dynamic_stackalloc_kernel_control_flow(i32 %n, i
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-NEXT: s_cmp_lg_u32 s0, 0
; GFX11-SDAG-NEXT: s_mov_b32 s0, 0
-; GFX11-SDAG-NEXT: s_cbranch_scc0 .LBB7_6
+; GFX11-SDAG-NEXT: s_cbranch_scc0 .LBB7_4
; GFX11-SDAG-NEXT: ; %bb.1: ; %bb.1
; GFX11-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX11-SDAG-NEXT: s_mov_b32 s2, exec_lo
@@ -961,8 +959,6 @@ define amdgpu_kernel void @test_dynamic_stackalloc_kernel_control_flow(i32 %n, i
; GFX11-SDAG-NEXT: s_add_i32 s32, s0, s1
; GFX11-SDAG-NEXT: .LBB7_5: ; %bb.2
; GFX11-SDAG-NEXT: s_endpgm
-; GFX11-SDAG-NEXT: .LBB7_6:
-; GFX11-SDAG-NEXT: s_branch .LBB7_4
;
; GFX11-GISEL-LABEL: test_dynamic_stackalloc_kernel_control_flow:
; GFX11-GISEL: ; %bb.0: ; %entry
diff --git a/llvm/test/CodeGen/AMDGPU/extract-subvector-16bit.ll b/llvm/test/CodeGen/AMDGPU/extract-subvector-16bit.ll
index d879ebede164e..fbbd6201f7b7b 100644
--- a/llvm/test/CodeGen/AMDGPU/extract-subvector-16bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/extract-subvector-16bit.ll
@@ -92,7 +92,7 @@ define <4 x i16> @vec_8xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1)
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s16, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB0_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB0_2
; GFX9-NEXT: ; %bb.1: ; %F
; GFX9-NEXT: global_load_dwordx4 v[2:5], v[2:3], off glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -112,16 +112,13 @@ define <4 x i16> @vec_8xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1)
; GFX9-NEXT: v_perm_b32 v0, v0, v2, s4
; GFX9-NEXT: v_perm_b32 v1, v3, v1, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB0_4:
-; GFX9-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
-; GFX9-NEXT: s_branch .LBB0_2
;
; GFX11-TRUE16-LABEL: vec_8xi16_extract_4xi16:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s0, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB0_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB0_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %F
; GFX11-TRUE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
@@ -140,16 +137,13 @@ define <4 x i16> @vec_8xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1)
; GFX11-TRUE16-NEXT: v_or_b16 v0.h, 0x8000, v2.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v1.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB0_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
-; GFX11-TRUE16-NEXT: s_branch .LBB0_2
;
; GFX11-FAKE16-LABEL: vec_8xi16_extract_4xi16:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s0, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB0_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB0_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %F
; GFX11-FAKE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
@@ -173,9 +167,6 @@ define <4 x i16> @vec_8xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1)
; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100
; GFX11-FAKE16-NEXT: v_perm_b32 v1, v3, v1, 0x5040100
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB0_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
-; GFX11-FAKE16-NEXT: s_branch .LBB0_2
%cond = icmp eq i32 %cond.arg, 0
br i1 %cond, label %T, label %F
@@ -284,7 +275,7 @@ define <4 x i16> @vec_8xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace(
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s16, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %F
; GFX9-NEXT: global_load_dwordx4 v[2:5], v[2:3], off glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -304,16 +295,13 @@ define <4 x i16> @vec_8xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace(
; GFX9-NEXT: v_perm_b32 v0, v0, v3, s4
; GFX9-NEXT: v_perm_b32 v1, v2, v1, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-TRUE16-LABEL: vec_8xi16_extract_4xi16_2:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s0, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %F
; GFX11-TRUE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
@@ -332,16 +320,13 @@ define <4 x i16> @vec_8xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace(
; GFX11-TRUE16-NEXT: v_or_b16 v0.h, 0x8000, v0.h
; GFX11-TRUE16-NEXT: v_or_b16 v1.h, 0x8000, v1.h
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB1_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
-; GFX11-TRUE16-NEXT: s_branch .LBB1_2
;
; GFX11-FAKE16-LABEL: vec_8xi16_extract_4xi16_2:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s0, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %F
; GFX11-FAKE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
@@ -365,9 +350,6 @@ define <4 x i16> @vec_8xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace(
; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100
; GFX11-FAKE16-NEXT: v_perm_b32 v1, v3, v1, 0x5040100
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB1_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
-; GFX11-FAKE16-NEXT: s_branch .LBB1_2
%cond = icmp eq i32 %cond.arg, 0
br i1 %cond, label %T, label %F
@@ -473,7 +455,7 @@ define <4 x half> @vec_8xf16_extract_4xf16(ptr addrspace(1) %p0, ptr addrspace(1
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s16, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB2_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB2_2
; GFX9-NEXT: ; %bb.1: ; %F
; GFX9-NEXT: global_load_dwordx4 v[2:5], v[2:3], off glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -496,16 +478,13 @@ define <4 x half> @vec_8xf16_extract_4xf16(ptr addrspace(1) %p0, ptr addrspace(1
; GFX9-NEXT: v_pack_b32_f16 v1, v0, v5
; GFX9-NEXT: v_pack_b32_f16 v0, v4, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB2_4:
-; GFX9-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
-; GFX9-NEXT: s_branch .LBB2_2
;
; GFX11-TRUE16-LABEL: vec_8xf16_extract_4xf16:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s0, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB2_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB2_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %F
; GFX11-TRUE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
@@ -527,16 +506,13 @@ define <4 x half> @vec_8xf16_extract_4xf16(ptr addrspace(1) %p0, ptr addrspace(1
; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, 0x3d00, v1.l, s1
; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v1.l, 0x3d00, s2
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB2_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
-; GFX11-TRUE16-NEXT: s_branch .LBB2_2
;
; GFX11-FAKE16-LABEL: vec_8xf16_extract_4xf16:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s0, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB2_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB2_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %F
; GFX11-FAKE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
@@ -561,9 +537,6 @@ define <4 x half> @vec_8xf16_extract_4xf16(ptr addrspace(1) %p0, ptr addrspace(1
; GFX11-FAKE16-NEXT: v_pack_b32_f16 v0, v2, v1
; GFX11-FAKE16-NEXT: v_pack_b32_f16 v1, v3, v4
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB2_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
-; GFX11-FAKE16-NEXT: s_branch .LBB2_2
%cond = icmp eq i32 %cond.arg, 0
br i1 %cond, label %T, label %F
@@ -704,7 +677,7 @@ define <4 x i16> @vec_16xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s16, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %F
; GFX9-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -730,16 +703,13 @@ define <4 x i16> @vec_16xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1
; GFX9-NEXT: v_perm_b32 v0, v0, v3, s4
; GFX9-NEXT: v_perm_b32 v1, v2, v1, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_4:
-; GFX9-NEXT: ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11
-; GFX9-NEXT: s_branch .LBB3_2
;
; GFX11-TRUE16-LABEL: vec_16xi16_extract_4xi16:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s0, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB3_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %F
; GFX11-TRUE16-NEXT: global_load_b128 v[4:7], v[2:3], off offset:16 glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
@@ -762,16 +732,13 @@ define <4 x i16> @vec_16xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1
; GFX11-TRUE16-NEXT: v_or_b16 v0.h, 0x8000, v2.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v1.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB3_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX11-TRUE16-NEXT: s_branch .LBB3_2
;
; GFX11-FAKE16-LABEL: vec_16xi16_extract_4xi16:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s0, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB3_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB3_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %F
; GFX11-FAKE16-NEXT: global_load_b128 v[4:7], v[2:3], off offset:16 glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
@@ -799,9 +766,6 @@ define <4 x i16> @vec_16xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1
; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100
; GFX11-FAKE16-NEXT: v_perm_b32 v1, v3, v1, 0x5040100
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB3_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX11-FAKE16-NEXT: s_branch .LBB3_2
%cond = icmp eq i32 %cond.arg, 0
br i1 %cond, label %T, label %F
@@ -943,7 +907,7 @@ define <4 x i16> @vec_16xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s16, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB4_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB4_2
; GFX9-NEXT: ; %bb.1: ; %F
; GFX9-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -969,16 +933,13 @@ define <4 x i16> @vec_16xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace
; GFX9-NEXT: v_perm_b32 v0, v0, v3, s4
; GFX9-NEXT: v_perm_b32 v1, v2, v1, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB4_4:
-; GFX9-NEXT: ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11
-; GFX9-NEXT: s_branch .LBB4_2
;
; GFX11-TRUE16-LABEL: vec_16xi16_extract_4xi16_2:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s0, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB4_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB4_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %F
; GFX11-TRUE16-NEXT: global_load_b128 v[4:7], v[2:3], off offset:16 glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
@@ -1001,16 +962,13 @@ define <4 x i16> @vec_16xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace
; GFX11-TRUE16-NEXT: v_or_b16 v0.h, 0x8000, v0.h
; GFX11-TRUE16-NEXT: v_or_b16 v1.h, 0x8000, v1.h
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB4_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX11-TRUE16-NEXT: s_branch .LBB4_2
;
; GFX11-FAKE16-LABEL: vec_16xi16_extract_4xi16_2:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s0, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB4_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB4_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %F
; GFX11-FAKE16-NEXT: global_load_b128 v[4:7], v[2:3], off offset:16 glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
@@ -1038,9 +996,6 @@ define <4 x i16> @vec_16xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace
; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100
; GFX11-FAKE16-NEXT: v_perm_b32 v1, v3, v1, 0x5040100
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB4_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX11-FAKE16-NEXT: s_branch .LBB4_2
%cond = icmp eq i32 %cond.arg, 0
br i1 %cond, label %T, label %F
@@ -1179,7 +1134,7 @@ define <4 x half> @vec_16xf16_extract_4xf16(ptr addrspace(1) %p0, ptr addrspace(
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s16, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %F
; GFX9-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -1208,16 +1163,13 @@ define <4 x half> @vec_16xf16_extract_4xf16(ptr addrspace(1) %p0, ptr addrspace(
; GFX9-NEXT: v_pack_b32_f16 v1, v0, v4
; GFX9-NEXT: v_pack_b32_f16 v0, v2, v3
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-TRUE16-LABEL: vec_16xf16_extract_4xf16:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s0, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %F
; GFX11-TRUE16-NEXT: global_load_b128 v[4:7], v[2:3], off offset:16 glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
@@ -1243,16 +1195,13 @@ define <4 x half> @vec_16xf16_extract_4xf16(ptr addrspace(1) %p0, ptr addrspace(
; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, 0x3d00, v1.l, s1
; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v1.l, 0x3d00, s2
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB5_4:
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX11-TRUE16-NEXT: s_branch .LBB5_2
;
; GFX11-FAKE16-LABEL: vec_16xf16_extract_4xf16:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s0, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB5_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %F
; GFX11-FAKE16-NEXT: global_load_b128 v[4:7], v[2:3], off offset:16 glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
@@ -1281,9 +1230,6 @@ define <4 x half> @vec_16xf16_extract_4xf16(ptr addrspace(1) %p0, ptr addrspace(
; GFX11-FAKE16-NEXT: v_pack_b32_f16 v0, v2, v1
; GFX11-FAKE16-NEXT: v_pack_b32_f16 v1, v3, v4
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB5_4:
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX11-FAKE16-NEXT: s_branch .LBB5_2
%cond = icmp eq i32 %cond.arg, 0
br i1 %cond, label %T, label %F
diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
index a6fd0b0b0b2d2..871de8988c614 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
@@ -11926,27 +11926,24 @@ define double @flat_atomic_fadd_f64_saddr_rtn(ptr inreg %ptr, double %data) {
; GFX1250-SDAG-NEXT: s_cmp_eq_u32 s1, s3
; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0
; GFX1250-SDAG-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX1250-SDAG-NEXT: s_cbranch_vccz .LBB110_3
+; GFX1250-SDAG-NEXT: s_cbranch_vccz .LBB110_6
; GFX1250-SDAG-NEXT: ; %bb.1: ; %atomicrmw.check.private
; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s2, 0x4000000
; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0
; GFX1250-SDAG-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX1250-SDAG-NEXT: s_cbranch_vccz .LBB110_4
+; GFX1250-SDAG-NEXT: s_cbranch_vccz .LBB110_3
; GFX1250-SDAG-NEXT: ; %bb.2: ; %atomicrmw.global
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0
; GFX1250-SDAG-NEXT: global_atomic_add_f64 v[2:3], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB110_5
-; GFX1250-SDAG-NEXT: s_branch .LBB110_6
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB110_4
+; GFX1250-SDAG-NEXT: s_branch .LBB110_5
; GFX1250-SDAG-NEXT: .LBB110_3:
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX1250-SDAG-NEXT: s_branch .LBB110_7
-; GFX1250-SDAG-NEXT: .LBB110_4:
-; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX1250-SDAG-NEXT: .LBB110_5: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: .LBB110_4: ; %atomicrmw.private
; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1250-SDAG-NEXT: s_cselect_b32 s2, s2, -1
@@ -11954,10 +11951,10 @@ define double @flat_atomic_fadd_f64_saddr_rtn(ptr inreg %ptr, double %data) {
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_add_f64_e32 v[4:5], v[2:3], v[0:1]
; GFX1250-SDAG-NEXT: scratch_store_b64 off, v[4:5], s2
-; GFX1250-SDAG-NEXT: .LBB110_6: ; %Flow1
+; GFX1250-SDAG-NEXT: .LBB110_5: ; %Flow1
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
-; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB110_8
-; GFX1250-SDAG-NEXT: .LBB110_7: ; %atomicrmw.shared
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB110_7
+; GFX1250-SDAG-NEXT: .LBB110_6: ; %atomicrmw.shared
; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1250-SDAG-NEXT: s_cselect_b32 s0, s0, -1
; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -11965,7 +11962,7 @@ define double @flat_atomic_fadd_f64_saddr_rtn(ptr inreg %ptr, double %data) {
; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0
; GFX1250-SDAG-NEXT: ds_add_rtn_f64 v[2:3], v2, v[0:1]
; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
-; GFX1250-SDAG-NEXT: .LBB110_8: ; %atomicrmw.end
+; GFX1250-SDAG-NEXT: .LBB110_7: ; %atomicrmw.end
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, v2 :: v_dual_mov_b32 v1, v3
; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
;
@@ -12035,40 +12032,37 @@ define double @flat_atomic_fadd_f64_saddr_rtn(ptr inreg %ptr, double %data) {
; GFX950-SDAG-NEXT: s_cmp_eq_u32 s1, s3
; GFX950-SDAG-NEXT: s_cselect_b64 s[2:3], -1, 0
; GFX950-SDAG-NEXT: s_andn2_b64 vcc, exec, s[2:3]
-; GFX950-SDAG-NEXT: s_cbranch_vccz .LBB110_3
+; GFX950-SDAG-NEXT: s_cbranch_vccz .LBB110_6
; GFX950-SDAG-NEXT: ; %bb.1: ; %atomicrmw.check.private
; GFX950-SDAG-NEXT: s_mov_b64 s[2:3], src_private_base
; GFX950-SDAG-NEXT: s_cmp_eq_u32 s1, s3
; GFX950-SDAG-NEXT: s_cselect_b64 s[2:3], -1, 0
; GFX950-SDAG-NEXT: s_andn2_b64 vcc, exec, s[2:3]
-; GFX950-SDAG-NEXT: s_cbranch_vccz .LBB110_4
+; GFX950-SDAG-NEXT: s_cbranch_vccz .LBB110_3
; GFX950-SDAG-NEXT: ; %bb.2: ; %atomicrmw.global
; GFX950-SDAG-NEXT: v_mov_b32_e32 v2, 0
; GFX950-SDAG-NEXT: global_atomic_add_f64 v[2:3], v2, v[0:1], s[0:1] sc0
-; GFX950-SDAG-NEXT: s_cbranch_execz .LBB110_5
-; GFX950-SDAG-NEXT: s_branch .LBB110_6
+; GFX950-SDAG-NEXT: s_cbranch_execz .LBB110_4
+; GFX950-SDAG-NEXT: s_branch .LBB110_5
; GFX950-SDAG-NEXT: .LBB110_3:
; GFX950-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX950-SDAG-NEXT: s_branch .LBB110_7
-; GFX950-SDAG-NEXT: .LBB110_4:
-; GFX950-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX950-SDAG-NEXT: .LBB110_5: ; %atomicrmw.private
+; GFX950-SDAG-NEXT: .LBB110_4: ; %atomicrmw.private
; GFX950-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX950-SDAG-NEXT: s_cselect_b32 s2, s0, -1
; GFX950-SDAG-NEXT: scratch_load_dwordx2 v[2:3], off, s2
; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0)
; GFX950-SDAG-NEXT: v_add_f64 v[4:5], v[2:3], v[0:1]
; GFX950-SDAG-NEXT: scratch_store_dwordx2 off, v[4:5], s2
-; GFX950-SDAG-NEXT: .LBB110_6: ; %Flow1
-; GFX950-SDAG-NEXT: s_cbranch_execnz .LBB110_8
-; GFX950-SDAG-NEXT: .LBB110_7: ; %atomicrmw.shared
+; GFX950-SDAG-NEXT: .LBB110_5: ; %Flow1
+; GFX950-SDAG-NEXT: s_cbranch_execnz .LBB110_7
+; GFX950-SDAG-NEXT: .LBB110_6: ; %atomicrmw.shared
; GFX950-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX950-SDAG-NEXT: s_cselect_b32 s0, s0, -1
; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0)
; GFX950-SDAG-NEXT: v_mov_b32_e32 v2, s0
; GFX950-SDAG-NEXT: ds_add_rtn_f64 v[2:3], v2, v[0:1]
; GFX950-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX950-SDAG-NEXT: .LBB110_8: ; %atomicrmw.end
+; GFX950-SDAG-NEXT: .LBB110_7: ; %atomicrmw.end
; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0)
; GFX950-SDAG-NEXT: v_mov_b32_e32 v0, v2
; GFX950-SDAG-NEXT: v_mov_b32_e32 v1, v3
diff --git a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
index 8fcf1ad3fbc95..321bd42fef503 100644
--- a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
+++ b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
@@ -7297,7 +7297,7 @@ define amdgpu_kernel void @extract_adjacent_blocks(i32 %arg) {
; SI-MOVREL-NEXT: s_load_dword s0, s[4:5], 0x9
; SI-MOVREL-NEXT: s_waitcnt lgkmcnt(0)
; SI-MOVREL-NEXT: s_cmp_lg_u32 s0, 0
-; SI-MOVREL-NEXT: s_cbranch_scc0 .LBB19_4
+; SI-MOVREL-NEXT: s_cbranch_scc0 .LBB19_2
; SI-MOVREL-NEXT: ; %bb.1: ; %bb4
; SI-MOVREL-NEXT: s_mov_b32 s3, 0xf000
; SI-MOVREL-NEXT: s_mov_b32 s2, -1
@@ -7321,15 +7321,13 @@ define amdgpu_kernel void @extract_adjacent_blocks(i32 %arg) {
; SI-MOVREL-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-MOVREL-NEXT: s_waitcnt vmcnt(0)
; SI-MOVREL-NEXT: s_endpgm
-; SI-MOVREL-NEXT: .LBB19_4:
-; SI-MOVREL-NEXT: s_branch .LBB19_2
;
; VI-LABEL: extract_adjacent_blocks:
; VI: ; %bb.0: ; %bb
; VI-NEXT: s_load_dword s0, s[4:5], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s0, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_4
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %bb4
; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
@@ -7347,15 +7345,13 @@ define amdgpu_kernel void @extract_adjacent_blocks(i32 %arg) {
; VI-NEXT: flat_store_dword v[0:1], v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_endpgm
-; VI-NEXT: .LBB19_4:
-; VI-NEXT: s_branch .LBB19_2
;
; GFX9-IDXMODE-LABEL: extract_adjacent_blocks:
; GFX9-IDXMODE: ; %bb.0: ; %bb
; GFX9-IDXMODE-NEXT: s_load_dword s0, s[4:5], 0x24
; GFX9-IDXMODE-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-IDXMODE-NEXT: s_cmp_lg_u32 s0, 0
-; GFX9-IDXMODE-NEXT: s_cbranch_scc0 .LBB19_4
+; GFX9-IDXMODE-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-IDXMODE-NEXT: ; %bb.1: ; %bb4
; GFX9-IDXMODE-NEXT: global_load_dwordx4 v[0:3], v[0:1], off glc
; GFX9-IDXMODE-NEXT: s_waitcnt vmcnt(0)
@@ -7373,8 +7369,6 @@ define amdgpu_kernel void @extract_adjacent_blocks(i32 %arg) {
; GFX9-IDXMODE-NEXT: global_store_dword v[0:1], v0, off
; GFX9-IDXMODE-NEXT: s_waitcnt vmcnt(0)
; GFX9-IDXMODE-NEXT: s_endpgm
-; GFX9-IDXMODE-NEXT: .LBB19_4:
-; GFX9-IDXMODE-NEXT: s_branch .LBB19_2
bb:
%tmp = icmp eq i32 %arg, 0
br i1 %tmp, label %bb1, label %bb4
@@ -7542,7 +7536,7 @@ define amdgpu_kernel void @insert_adjacent_blocks(i32 %arg, float %val0) {
; SI-MOVREL-NEXT: s_load_dword s0, s[4:5], 0x9
; SI-MOVREL-NEXT: s_waitcnt lgkmcnt(0)
; SI-MOVREL-NEXT: s_cmp_lg_u32 s0, 0
-; SI-MOVREL-NEXT: s_cbranch_scc0 .LBB20_4
+; SI-MOVREL-NEXT: s_cbranch_scc0 .LBB20_2
; SI-MOVREL-NEXT: ; %bb.1: ; %bb4
; SI-MOVREL-NEXT: s_mov_b32 s3, 0xf000
; SI-MOVREL-NEXT: s_mov_b32 s2, -1
@@ -7566,15 +7560,13 @@ define amdgpu_kernel void @insert_adjacent_blocks(i32 %arg, float %val0) {
; SI-MOVREL-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; SI-MOVREL-NEXT: s_waitcnt vmcnt(0)
; SI-MOVREL-NEXT: s_endpgm
-; SI-MOVREL-NEXT: .LBB20_4:
-; SI-MOVREL-NEXT: s_branch .LBB20_2
;
; VI-LABEL: insert_adjacent_blocks:
; VI: ; %bb.0: ; %bb
; VI-NEXT: s_load_dword s0, s[4:5], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s0, 0
-; VI-NEXT: s_cbranch_scc0 .LBB20_4
+; VI-NEXT: s_cbranch_scc0 .LBB20_2
; VI-NEXT: ; %bb.1: ; %bb4
; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
@@ -7592,15 +7584,13 @@ define amdgpu_kernel void @insert_adjacent_blocks(i32 %arg, float %val0) {
; VI-NEXT: flat_store_dwordx4 v[0:1], v[0:3]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_endpgm
-; VI-NEXT: .LBB20_4:
-; VI-NEXT: s_branch .LBB20_2
;
; GFX9-IDXMODE-LABEL: insert_adjacent_blocks:
; GFX9-IDXMODE: ; %bb.0: ; %bb
; GFX9-IDXMODE-NEXT: s_load_dword s0, s[4:5], 0x24
; GFX9-IDXMODE-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-IDXMODE-NEXT: s_cmp_lg_u32 s0, 0
-; GFX9-IDXMODE-NEXT: s_cbranch_scc0 .LBB20_4
+; GFX9-IDXMODE-NEXT: s_cbranch_scc0 .LBB20_2
; GFX9-IDXMODE-NEXT: ; %bb.1: ; %bb4
; GFX9-IDXMODE-NEXT: global_load_dwordx4 v[0:3], v[0:1], off glc
; GFX9-IDXMODE-NEXT: s_waitcnt vmcnt(0)
@@ -7618,8 +7608,6 @@ define amdgpu_kernel void @insert_adjacent_blocks(i32 %arg, float %val0) {
; GFX9-IDXMODE-NEXT: global_store_dwordx4 v[0:1], v[0:3], off
; GFX9-IDXMODE-NEXT: s_waitcnt vmcnt(0)
; GFX9-IDXMODE-NEXT: s_endpgm
-; GFX9-IDXMODE-NEXT: .LBB20_4:
-; GFX9-IDXMODE-NEXT: s_branch .LBB20_2
bb:
%tmp = icmp eq i32 %arg, 0
br i1 %tmp, label %bb1, label %bb4
@@ -9226,24 +9214,21 @@ define amdgpu_kernel void @broken_phi_bb(i32 %arg, i32 %arg1) {
; SI-MOVREL-NEXT: v_mov_b32_e32 v0, 8
; SI-MOVREL-NEXT: s_mov_b32 s3, 0xf000
; SI-MOVREL-NEXT: s_mov_b32 s2, -1
-; SI-MOVREL-NEXT: s_branch .LBB26_2
-; SI-MOVREL-NEXT: .LBB26_1:
-; SI-MOVREL-NEXT: ; implicit-def: $vgpr0
-; SI-MOVREL-NEXT: s_branch .LBB26_6
-; SI-MOVREL-NEXT: .LBB26_2: ; %bb2
+; SI-MOVREL-NEXT: s_branch .LBB26_1
+; SI-MOVREL-NEXT: .LBB26_1: ; %bb2
; SI-MOVREL-NEXT: ; =>This Loop Header: Depth=1
-; SI-MOVREL-NEXT: ; Child Loop BB26_4 Depth 2
+; SI-MOVREL-NEXT: ; Child Loop BB26_3 Depth 2
; SI-MOVREL-NEXT: s_waitcnt lgkmcnt(0)
; SI-MOVREL-NEXT: v_cmp_le_i32_e32 vcc, s0, v0
-; SI-MOVREL-NEXT: s_cbranch_vccnz .LBB26_1
-; SI-MOVREL-NEXT: ; %bb.3: ; %bb4
-; SI-MOVREL-NEXT: ; in Loop: Header=BB26_2 Depth=1
+; SI-MOVREL-NEXT: s_cbranch_vccnz .LBB26_5
+; SI-MOVREL-NEXT: ; %bb.2: ; %bb4
+; SI-MOVREL-NEXT: ; in Loop: Header=BB26_1 Depth=1
; SI-MOVREL-NEXT: buffer_load_dword v16, off, s[0:3], 0 glc
; SI-MOVREL-NEXT: s_waitcnt vmcnt(0)
; SI-MOVREL-NEXT: v_mov_b32_e32 v17, s1
; SI-MOVREL-NEXT: s_mov_b64 s[4:5], exec
; SI-MOVREL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-MOVREL-NEXT: .LBB26_4: ; Parent Loop BB26_2 Depth=1
+; SI-MOVREL-NEXT: .LBB26_3: ; Parent Loop BB26_1 Depth=1
; SI-MOVREL-NEXT: ; => This Inner Loop Header: Depth=2
; SI-MOVREL-NEXT: v_readfirstlane_b32 s6, v16
; SI-MOVREL-NEXT: v_cmp_eq_u32_e32 vcc, s6, v16
@@ -9251,35 +9236,32 @@ define amdgpu_kernel void @broken_phi_bb(i32 %arg, i32 %arg1) {
; SI-MOVREL-NEXT: s_mov_b32 m0, s6
; SI-MOVREL-NEXT: v_movreld_b32_e32 v0, v17
; SI-MOVREL-NEXT: s_xor_b64 exec, exec, vcc
-; SI-MOVREL-NEXT: s_cbranch_execnz .LBB26_4
-; SI-MOVREL-NEXT: ; %bb.5: ; in Loop: Header=BB26_2 Depth=1
+; SI-MOVREL-NEXT: s_cbranch_execnz .LBB26_3
+; SI-MOVREL-NEXT: ; %bb.4: ; in Loop: Header=BB26_1 Depth=1
; SI-MOVREL-NEXT: s_mov_b64 exec, s[4:5]
-; SI-MOVREL-NEXT: s_cbranch_execnz .LBB26_2
-; SI-MOVREL-NEXT: .LBB26_6: ; %bb8
+; SI-MOVREL-NEXT: s_cbranch_execnz .LBB26_1
+; SI-MOVREL-NEXT: .LBB26_5: ; %bb8
; SI-MOVREL-NEXT: s_endpgm
;
; VI-MOVREL-LABEL: broken_phi_bb:
; VI-MOVREL: ; %bb.0: ; %bb
; VI-MOVREL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; VI-MOVREL-NEXT: v_mov_b32_e32 v0, 8
-; VI-MOVREL-NEXT: s_branch .LBB26_2
-; VI-MOVREL-NEXT: .LBB26_1:
-; VI-MOVREL-NEXT: ; implicit-def: $vgpr0
-; VI-MOVREL-NEXT: s_branch .LBB26_6
-; VI-MOVREL-NEXT: .LBB26_2: ; %bb2
+; VI-MOVREL-NEXT: s_branch .LBB26_1
+; VI-MOVREL-NEXT: .LBB26_1: ; %bb2
; VI-MOVREL-NEXT: ; =>This Loop Header: Depth=1
-; VI-MOVREL-NEXT: ; Child Loop BB26_4 Depth 2
+; VI-MOVREL-NEXT: ; Child Loop BB26_3 Depth 2
; VI-MOVREL-NEXT: s_waitcnt lgkmcnt(0)
; VI-MOVREL-NEXT: v_cmp_le_i32_e32 vcc, s0, v0
-; VI-MOVREL-NEXT: s_cbranch_vccnz .LBB26_1
-; VI-MOVREL-NEXT: ; %bb.3: ; %bb4
-; VI-MOVREL-NEXT: ; in Loop: Header=BB26_2 Depth=1
+; VI-MOVREL-NEXT: s_cbranch_vccnz .LBB26_5
+; VI-MOVREL-NEXT: ; %bb.2: ; %bb4
+; VI-MOVREL-NEXT: ; in Loop: Header=BB26_1 Depth=1
; VI-MOVREL-NEXT: flat_load_dword v16, v[0:1] glc
; VI-MOVREL-NEXT: s_waitcnt vmcnt(0)
; VI-MOVREL-NEXT: v_mov_b32_e32 v17, s1
; VI-MOVREL-NEXT: s_mov_b64 s[2:3], exec
; VI-MOVREL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-MOVREL-NEXT: .LBB26_4: ; Parent Loop BB26_2 Depth=1
+; VI-MOVREL-NEXT: .LBB26_3: ; Parent Loop BB26_1 Depth=1
; VI-MOVREL-NEXT: ; => This Inner Loop Header: Depth=2
; VI-MOVREL-NEXT: v_readfirstlane_b32 s4, v16
; VI-MOVREL-NEXT: v_cmp_eq_u32_e32 vcc, s4, v16
@@ -9287,35 +9269,32 @@ define amdgpu_kernel void @broken_phi_bb(i32 %arg, i32 %arg1) {
; VI-MOVREL-NEXT: s_mov_b32 m0, s4
; VI-MOVREL-NEXT: v_movreld_b32_e32 v0, v17
; VI-MOVREL-NEXT: s_xor_b64 exec, exec, vcc
-; VI-MOVREL-NEXT: s_cbranch_execnz .LBB26_4
-; VI-MOVREL-NEXT: ; %bb.5: ; in Loop: Header=BB26_2 Depth=1
+; VI-MOVREL-NEXT: s_cbranch_execnz .LBB26_3
+; VI-MOVREL-NEXT: ; %bb.4: ; in Loop: Header=BB26_1 Depth=1
; VI-MOVREL-NEXT: s_mov_b64 exec, s[2:3]
-; VI-MOVREL-NEXT: s_cbranch_execnz .LBB26_2
-; VI-MOVREL-NEXT: .LBB26_6: ; %bb8
+; VI-MOVREL-NEXT: s_cbranch_execnz .LBB26_1
+; VI-MOVREL-NEXT: .LBB26_5: ; %bb8
; VI-MOVREL-NEXT: s_endpgm
;
; VI-IDXMODE-LABEL: broken_phi_bb:
; VI-IDXMODE: ; %bb.0: ; %bb
; VI-IDXMODE-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; VI-IDXMODE-NEXT: v_mov_b32_e32 v0, 8
-; VI-IDXMODE-NEXT: s_branch .LBB26_2
-; VI-IDXMODE-NEXT: .LBB26_1:
-; VI-IDXMODE-NEXT: ; implicit-def: $vgpr0
-; VI-IDXMODE-NEXT: s_branch .LBB26_6
-; VI-IDXMODE-NEXT: .LBB26_2: ; %bb2
+; VI-IDXMODE-NEXT: s_branch .LBB26_1
+; VI-IDXMODE-NEXT: .LBB26_1: ; %bb2
; VI-IDXMODE-NEXT: ; =>This Loop Header: Depth=1
-; VI-IDXMODE-NEXT: ; Child Loop BB26_4 Depth 2
+; VI-IDXMODE-NEXT: ; Child Loop BB26_3 Depth 2
; VI-IDXMODE-NEXT: s_waitcnt lgkmcnt(0)
; VI-IDXMODE-NEXT: v_cmp_le_i32_e32 vcc, s0, v0
-; VI-IDXMODE-NEXT: s_cbranch_vccnz .LBB26_1
-; VI-IDXMODE-NEXT: ; %bb.3: ; %bb4
-; VI-IDXMODE-NEXT: ; in Loop: Header=BB26_2 Depth=1
+; VI-IDXMODE-NEXT: s_cbranch_vccnz .LBB26_5
+; VI-IDXMODE-NEXT: ; %bb.2: ; %bb4
+; VI-IDXMODE-NEXT: ; in Loop: Header=BB26_1 Depth=1
; VI-IDXMODE-NEXT: flat_load_dword v16, v[0:1] glc
; VI-IDXMODE-NEXT: s_waitcnt vmcnt(0)
; VI-IDXMODE-NEXT: v_mov_b32_e32 v17, s1
; VI-IDXMODE-NEXT: s_mov_b64 s[2:3], exec
; VI-IDXMODE-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-IDXMODE-NEXT: .LBB26_4: ; Parent Loop BB26_2 Depth=1
+; VI-IDXMODE-NEXT: .LBB26_3: ; Parent Loop BB26_1 Depth=1
; VI-IDXMODE-NEXT: ; => This Inner Loop Header: Depth=2
; VI-IDXMODE-NEXT: v_readfirstlane_b32 s4, v16
; VI-IDXMODE-NEXT: v_cmp_eq_u32_e32 vcc, s4, v16
@@ -9324,35 +9303,32 @@ define amdgpu_kernel void @broken_phi_bb(i32 %arg, i32 %arg1) {
; VI-IDXMODE-NEXT: v_mov_b32_e32 v0, v17
; VI-IDXMODE-NEXT: s_set_gpr_idx_off
; VI-IDXMODE-NEXT: s_xor_b64 exec, exec, vcc
-; VI-IDXMODE-NEXT: s_cbranch_execnz .LBB26_4
-; VI-IDXMODE-NEXT: ; %bb.5: ; in Loop: Header=BB26_2 Depth=1
+; VI-IDXMODE-NEXT: s_cbranch_execnz .LBB26_3
+; VI-IDXMODE-NEXT: ; %bb.4: ; in Loop: Header=BB26_1 Depth=1
; VI-IDXMODE-NEXT: s_mov_b64 exec, s[2:3]
-; VI-IDXMODE-NEXT: s_cbranch_execnz .LBB26_2
-; VI-IDXMODE-NEXT: .LBB26_6: ; %bb8
+; VI-IDXMODE-NEXT: s_cbranch_execnz .LBB26_1
+; VI-IDXMODE-NEXT: .LBB26_5: ; %bb8
; VI-IDXMODE-NEXT: s_endpgm
;
; GFX9-IDXMODE-LABEL: broken_phi_bb:
; GFX9-IDXMODE: ; %bb.0: ; %bb
; GFX9-IDXMODE-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX9-IDXMODE-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-IDXMODE-NEXT: s_branch .LBB26_2
-; GFX9-IDXMODE-NEXT: .LBB26_1:
-; GFX9-IDXMODE-NEXT: ; implicit-def: $vgpr0
-; GFX9-IDXMODE-NEXT: s_branch .LBB26_6
-; GFX9-IDXMODE-NEXT: .LBB26_2: ; %bb2
+; GFX9-IDXMODE-NEXT: s_branch .LBB26_1
+; GFX9-IDXMODE-NEXT: .LBB26_1: ; %bb2
; GFX9-IDXMODE-NEXT: ; =>This Loop Header: Depth=1
-; GFX9-IDXMODE-NEXT: ; Child Loop BB26_4 Depth 2
+; GFX9-IDXMODE-NEXT: ; Child Loop BB26_3 Depth 2
; GFX9-IDXMODE-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-IDXMODE-NEXT: v_cmp_le_i32_e32 vcc, s0, v0
-; GFX9-IDXMODE-NEXT: s_cbranch_vccnz .LBB26_1
-; GFX9-IDXMODE-NEXT: ; %bb.3: ; %bb4
-; GFX9-IDXMODE-NEXT: ; in Loop: Header=BB26_2 Depth=1
+; GFX9-IDXMODE-NEXT: s_cbranch_vccnz .LBB26_5
+; GFX9-IDXMODE-NEXT: ; %bb.2: ; %bb4
+; GFX9-IDXMODE-NEXT: ; in Loop: Header=BB26_1 Depth=1
; GFX9-IDXMODE-NEXT: global_load_dword v16, v[0:1], off glc
; GFX9-IDXMODE-NEXT: s_waitcnt vmcnt(0)
; GFX9-IDXMODE-NEXT: v_mov_b32_e32 v17, s1
; GFX9-IDXMODE-NEXT: s_mov_b64 s[2:3], exec
; GFX9-IDXMODE-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-IDXMODE-NEXT: .LBB26_4: ; Parent Loop BB26_2 Depth=1
+; GFX9-IDXMODE-NEXT: .LBB26_3: ; Parent Loop BB26_1 Depth=1
; GFX9-IDXMODE-NEXT: ; => This Inner Loop Header: Depth=2
; GFX9-IDXMODE-NEXT: v_readfirstlane_b32 s4, v16
; GFX9-IDXMODE-NEXT: v_cmp_eq_u32_e32 vcc, s4, v16
@@ -9361,11 +9337,11 @@ define amdgpu_kernel void @broken_phi_bb(i32 %arg, i32 %arg1) {
; GFX9-IDXMODE-NEXT: v_mov_b32_e32 v0, v17
; GFX9-IDXMODE-NEXT: s_set_gpr_idx_off
; GFX9-IDXMODE-NEXT: s_xor_b64 exec, exec, vcc
-; GFX9-IDXMODE-NEXT: s_cbranch_execnz .LBB26_4
-; GFX9-IDXMODE-NEXT: ; %bb.5: ; in Loop: Header=BB26_2 Depth=1
+; GFX9-IDXMODE-NEXT: s_cbranch_execnz .LBB26_3
+; GFX9-IDXMODE-NEXT: ; %bb.4: ; in Loop: Header=BB26_1 Depth=1
; GFX9-IDXMODE-NEXT: s_mov_b64 exec, s[2:3]
-; GFX9-IDXMODE-NEXT: s_cbranch_execnz .LBB26_2
-; GFX9-IDXMODE-NEXT: .LBB26_6: ; %bb8
+; GFX9-IDXMODE-NEXT: s_cbranch_execnz .LBB26_1
+; GFX9-IDXMODE-NEXT: .LBB26_5: ; %bb8
; GFX9-IDXMODE-NEXT: s_endpgm
bb:
br label %bb2
diff --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
index 7cbf9aeacfe48..63fb092847c29 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
@@ -1968,7 +1968,7 @@ define amdgpu_kernel void @insert_split_bb(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB42_4
+; SI-NEXT: s_cbranch_scc0 .LBB42_2
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: s_load_dword s5, s[2:3], 0x1
; SI-NEXT: s_mov_b64 s[6:7], 0
@@ -1986,8 +1986,6 @@ define amdgpu_kernel void @insert_split_bb(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
-; SI-NEXT: .LBB42_4:
-; SI-NEXT: s_branch .LBB42_2
;
; VI-LABEL: insert_split_bb:
; VI: ; %bb.0: ; %entry
@@ -1995,7 +1993,7 @@ define amdgpu_kernel void @insert_split_bb(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB42_4
+; VI-NEXT: s_cbranch_scc0 .LBB42_2
; VI-NEXT: ; %bb.1: ; %else
; VI-NEXT: s_load_dword s5, s[2:3], 0x4
; VI-NEXT: s_cbranch_execnz .LBB42_3
@@ -2010,8 +2008,6 @@ define amdgpu_kernel void @insert_split_bb(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
-; VI-NEXT: .LBB42_4:
-; VI-NEXT: s_branch .LBB42_2
entry:
%0 = insertelement <2 x i32> poison, i32 %a, i32 0
%1 = icmp eq i32 %a, 0
diff --git a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
index faf70f55876f7..83d5495eac416 100644
--- a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
+++ b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
@@ -26,22 +26,18 @@ define void @issue63986(i64 %0, i64 %idxprom, ptr inreg %ptr) {
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_store_dwordx4 v[6:7], v[10:13]
; CHECK-NEXT: s_cbranch_vccnz .LBB0_1
-; CHECK-NEXT: ; %bb.2: ; %dynamic-memcpy-expansion-residual-cond
-; CHECK-NEXT: s_branch .LBB0_4
-; CHECK-NEXT: ; %bb.3:
-; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
-; CHECK-NEXT: s_branch .LBB0_5
-; CHECK-NEXT: .LBB0_4: ; %dynamic-memcpy-expansion-residual-cond.dynamic-memcpy-post-expansion_crit_edge
+; CHECK-NEXT: s_branch .LBB0_2
+; CHECK-NEXT: .LBB0_2: ; %dynamic-memcpy-expansion-residual-cond.dynamic-memcpy-post-expansion_crit_edge
; CHECK-NEXT: v_lshlrev_b64 v[6:7], 6, v[2:3]
-; CHECK-NEXT: s_cbranch_execnz .LBB0_8
-; CHECK-NEXT: .LBB0_5: ; %dynamic-memcpy-expansion-residual-body.preheader
+; CHECK-NEXT: s_cbranch_execnz .LBB0_6
+; CHECK-NEXT: ; %bb.3: ; %dynamic-memcpy-expansion-residual-body.preheader
; CHECK-NEXT: s_add_u32 s4, s16, 32
; CHECK-NEXT: s_addc_u32 s5, s17, 0
; CHECK-NEXT: v_mov_b32_e32 v3, s5
; CHECK-NEXT: v_add_co_u32_e32 v2, vcc, s4, v4
; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
; CHECK-NEXT: s_mov_b64 s[4:5], 0
-; CHECK-NEXT: ; %bb.6: ; %dynamic-memcpy-expansion-residual-body
+; CHECK-NEXT: ; %bb.4: ; %dynamic-memcpy-expansion-residual-body
; CHECK-NEXT: s_add_u32 s6, 32, s4
; CHECK-NEXT: s_addc_u32 s7, 0, s5
; CHECK-NEXT: v_mov_b32_e32 v6, s6
@@ -54,10 +50,10 @@ define void @issue63986(i64 %0, i64 %idxprom, ptr inreg %ptr) {
; CHECK-NEXT: s_addc_u32 s5, 0, s5
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_store_byte v[6:7], v10
-; CHECK-NEXT: ; %bb.7:
+; CHECK-NEXT: ; %bb.5:
; CHECK-NEXT: v_mov_b32_e32 v7, v5
; CHECK-NEXT: v_mov_b32_e32 v6, v4
-; CHECK-NEXT: .LBB0_8: ; %dynamic-memcpy-post-expansion
+; CHECK-NEXT: .LBB0_6: ; %dynamic-memcpy-post-expansion
; CHECK-NEXT: v_and_b32_e32 v2, 15, v0
; CHECK-NEXT: v_and_b32_e32 v0, -16, v0
; CHECK-NEXT: v_add_co_u32_e32 v4, vcc, v6, v0
@@ -68,27 +64,27 @@ define void @issue63986(i64 %0, i64 %idxprom, ptr inreg %ptr) {
; CHECK-NEXT: v_mov_b32_e32 v6, s17
; CHECK-NEXT: v_add_co_u32_e32 v4, vcc, s16, v4
; CHECK-NEXT: v_addc_co_u32_e32 v5, vcc, v6, v5, vcc
-; CHECK-NEXT: s_branch .LBB0_11
-; CHECK-NEXT: .LBB0_9: ; %Flow14
-; CHECK-NEXT: ; in Loop: Header=BB0_11 Depth=1
+; CHECK-NEXT: s_branch .LBB0_9
+; CHECK-NEXT: .LBB0_7: ; %Flow14
+; CHECK-NEXT: ; in Loop: Header=BB0_9 Depth=1
; CHECK-NEXT: s_or_b64 exec, exec, s[10:11]
; CHECK-NEXT: s_mov_b64 s[8:9], 0
-; CHECK-NEXT: .LBB0_10: ; %Flow16
-; CHECK-NEXT: ; in Loop: Header=BB0_11 Depth=1
+; CHECK-NEXT: .LBB0_8: ; %Flow16
+; CHECK-NEXT: ; in Loop: Header=BB0_9 Depth=1
; CHECK-NEXT: s_andn2_b64 vcc, exec, s[8:9]
-; CHECK-NEXT: s_cbranch_vccz .LBB0_18
-; CHECK-NEXT: .LBB0_11: ; %while.cond
+; CHECK-NEXT: s_cbranch_vccz .LBB0_16
+; CHECK-NEXT: .LBB0_9: ; %while.cond
; CHECK-NEXT: ; =>This Loop Header: Depth=1
-; CHECK-NEXT: ; Child Loop BB0_13 Depth 2
-; CHECK-NEXT: ; Child Loop BB0_17 Depth 2
+; CHECK-NEXT: ; Child Loop BB0_11 Depth 2
+; CHECK-NEXT: ; Child Loop BB0_15 Depth 2
; CHECK-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
-; CHECK-NEXT: s_cbranch_execz .LBB0_14
-; CHECK-NEXT: ; %bb.12: ; %dynamic-memcpy-expansion-main-body2.preheader
-; CHECK-NEXT: ; in Loop: Header=BB0_11 Depth=1
+; CHECK-NEXT: s_cbranch_execz .LBB0_12
+; CHECK-NEXT: ; %bb.10: ; %dynamic-memcpy-expansion-main-body2.preheader
+; CHECK-NEXT: ; in Loop: Header=BB0_9 Depth=1
; CHECK-NEXT: s_mov_b64 s[10:11], 0
; CHECK-NEXT: s_mov_b64 s[12:13], 0
-; CHECK-NEXT: .LBB0_13: ; %dynamic-memcpy-expansion-main-body2
-; CHECK-NEXT: ; Parent Loop BB0_11 Depth=1
+; CHECK-NEXT: .LBB0_11: ; %dynamic-memcpy-expansion-main-body2
+; CHECK-NEXT: ; Parent Loop BB0_9 Depth=1
; CHECK-NEXT: ; => This Inner Loop Header: Depth=2
; CHECK-NEXT: v_mov_b32_e32 v6, s10
; CHECK-NEXT: v_mov_b32_e32 v7, s11
@@ -102,22 +98,22 @@ define void @issue63986(i64 %0, i64 %idxprom, ptr inreg %ptr) {
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_store_dwordx4 v[6:7], v[10:13]
; CHECK-NEXT: s_andn2_b64 exec, exec, s[12:13]
-; CHECK-NEXT: s_cbranch_execnz .LBB0_13
-; CHECK-NEXT: .LBB0_14: ; %Flow15
-; CHECK-NEXT: ; in Loop: Header=BB0_11 Depth=1
+; CHECK-NEXT: s_cbranch_execnz .LBB0_11
+; CHECK-NEXT: .LBB0_12: ; %Flow15
+; CHECK-NEXT: ; in Loop: Header=BB0_9 Depth=1
; CHECK-NEXT: s_or_b64 exec, exec, s[8:9]
; CHECK-NEXT: s_mov_b64 s[8:9], -1
-; CHECK-NEXT: s_cbranch_execz .LBB0_10
-; CHECK-NEXT: ; %bb.15: ; %dynamic-memcpy-expansion-residual-cond5
-; CHECK-NEXT: ; in Loop: Header=BB0_11 Depth=1
+; CHECK-NEXT: s_cbranch_execz .LBB0_8
+; CHECK-NEXT: ; %bb.13: ; %dynamic-memcpy-expansion-residual-cond5
+; CHECK-NEXT: ; in Loop: Header=BB0_9 Depth=1
; CHECK-NEXT: s_and_saveexec_b64 s[10:11], s[6:7]
-; CHECK-NEXT: s_cbranch_execz .LBB0_9
-; CHECK-NEXT: ; %bb.16: ; %dynamic-memcpy-expansion-residual-body4.preheader
-; CHECK-NEXT: ; in Loop: Header=BB0_11 Depth=1
+; CHECK-NEXT: s_cbranch_execz .LBB0_7
+; CHECK-NEXT: ; %bb.14: ; %dynamic-memcpy-expansion-residual-body4.preheader
+; CHECK-NEXT: ; in Loop: Header=BB0_9 Depth=1
; CHECK-NEXT: s_mov_b64 s[12:13], 0
; CHECK-NEXT: s_mov_b64 s[14:15], 0
-; CHECK-NEXT: .LBB0_17: ; %dynamic-memcpy-expansion-residual-body4
-; CHECK-NEXT: ; Parent Loop BB0_11 Depth=1
+; CHECK-NEXT: .LBB0_15: ; %dynamic-memcpy-expansion-residual-body4
+; CHECK-NEXT: ; Parent Loop BB0_9 Depth=1
; CHECK-NEXT: ; => This Inner Loop Header: Depth=2
; CHECK-NEXT: v_mov_b32_e32 v10, s13
; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, s12, v0
@@ -132,9 +128,9 @@ define void @issue63986(i64 %0, i64 %idxprom, ptr inreg %ptr) {
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_store_byte v[6:7], v11
; CHECK-NEXT: s_andn2_b64 exec, exec, s[14:15]
-; CHECK-NEXT: s_cbranch_execnz .LBB0_17
-; CHECK-NEXT: s_branch .LBB0_9
-; CHECK-NEXT: .LBB0_18: ; %DummyReturnBlock
+; CHECK-NEXT: s_cbranch_execnz .LBB0_15
+; CHECK-NEXT: s_branch .LBB0_7
+; CHECK-NEXT: .LBB0_16: ; %DummyReturnBlock
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
entry:
@@ -157,37 +153,35 @@ define void @issue63986_reduced_expanded(i64 %idxprom) {
; CHECK-NEXT: ; %bb.2: ; %loop-memcpy-residual-header
; CHECK-NEXT: s_and_b32 s4, 32, 15
; CHECK-NEXT: s_mov_b32 s5, 0
-; CHECK-NEXT: s_cbranch_scc0 .LBB1_4
-; CHECK-NEXT: ; %bb.3:
-; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
-; CHECK-NEXT: s_branch .LBB1_5
-; CHECK-NEXT: .LBB1_4: ; %loop-memcpy-residual-header.post-loop-memcpy-expansion_crit_edge
+; CHECK-NEXT: s_cbranch_scc0 .LBB1_3
+; CHECK-NEXT: s_branch .LBB1_4
+; CHECK-NEXT: .LBB1_3: ; %loop-memcpy-residual-header.post-loop-memcpy-expansion_crit_edge
; CHECK-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
-; CHECK-NEXT: s_cbranch_execnz .LBB1_8
-; CHECK-NEXT: .LBB1_5: ; %loop-memcpy-residual.preheader
+; CHECK-NEXT: s_cbranch_execnz .LBB1_7
+; CHECK-NEXT: .LBB1_4: ; %loop-memcpy-residual.preheader
; CHECK-NEXT: v_mov_b32_e32 v0, s4
; CHECK-NEXT: s_mov_b64 s[8:9], 0
; CHECK-NEXT: s_mov_b32 s7, 0
; CHECK-NEXT: v_mov_b32_e32 v1, s5
-; CHECK-NEXT: .LBB1_6: ; %loop-memcpy-residual
+; CHECK-NEXT: .LBB1_5: ; %loop-memcpy-residual
; CHECK-NEXT: s_add_i32 s6, s8, 1
; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
; CHECK-NEXT: s_mov_b64 s[8:9], 1
-; CHECK-NEXT: s_cbranch_vccnz .LBB1_6
-; CHECK-NEXT: ; %bb.7: ; %Flow
+; CHECK-NEXT: s_cbranch_vccnz .LBB1_5
+; CHECK-NEXT: ; %bb.6: ; %Flow
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b32_e32 v1, 0
-; CHECK-NEXT: .LBB1_8: ; %post-loop-memcpy-expansion
+; CHECK-NEXT: .LBB1_7: ; %post-loop-memcpy-expansion
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: v_mov_b32_e32 v3, v2
; CHECK-NEXT: v_mov_b32_e32 v4, v2
; CHECK-NEXT: v_mov_b32_e32 v5, v2
; CHECK-NEXT: s_and_b64 vcc, exec, 0
; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
-; CHECK-NEXT: .LBB1_9: ; %loop-memcpy-expansion2
+; CHECK-NEXT: .LBB1_8: ; %loop-memcpy-expansion2
; CHECK-NEXT: s_mov_b64 vcc, vcc
-; CHECK-NEXT: s_cbranch_vccz .LBB1_9
-; CHECK-NEXT: ; %bb.10: ; %DummyReturnBlock
+; CHECK-NEXT: s_cbranch_vccz .LBB1_8
+; CHECK-NEXT: ; %bb.9: ; %DummyReturnBlock
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/mul.ll b/llvm/test/CodeGen/AMDGPU/mul.ll
index f1130100725c8..155fa49b87af3 100644
--- a/llvm/test/CodeGen/AMDGPU/mul.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul.ll
@@ -128,7 +128,7 @@ define amdgpu_kernel void @test_mul_v2i32(ptr addrspace(1) %out, ptr addrspace(1
; GFX1250-LABEL: test_mul_v2i32:
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 nv
; GFX1250-NEXT: s_mov_b32 s6, -1
; GFX1250-NEXT: s_mov_b32 s7, 0x31016000
; GFX1250-NEXT: s_mov_b32 s10, s6
@@ -310,7 +310,7 @@ define amdgpu_kernel void @v_mul_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX1250-LABEL: v_mul_v4i32:
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 nv
; GFX1250-NEXT: s_mov_b32 s6, -1
; GFX1250-NEXT: s_mov_b32 s7, 0x31016000
; GFX1250-NEXT: s_mov_b32 s10, s6
@@ -450,9 +450,9 @@ define amdgpu_kernel void @s_trunc_i64_mul_to_i32(ptr addrspace(1) %out, i64 %a,
; GFX1250-LABEL: s_trunc_i64_mul_to_i32:
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 nv
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_load_b32 s3, s[4:5], 0x34
+; GFX1250-NEXT: s_load_b32 s3, s[4:5], 0x34 nv
; GFX1250-NEXT: ; kill: killed $sgpr4_sgpr5
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_mul_i32 s2, s3, s2
@@ -619,8 +619,8 @@ define amdgpu_kernel void @v_trunc_i64_mul_to_i32(ptr addrspace(1) %out, ptr add
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
; GFX1250-NEXT: s_clause 0x1
-; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_load_b64 s[8:9], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 nv
+; GFX1250-NEXT: s_load_b64 s[8:9], s[4:5], 0x34 nv
; GFX1250-NEXT: s_mov_b32 s6, -1
; GFX1250-NEXT: s_mov_b32 s7, 0x31016000
; GFX1250-NEXT: s_mov_b32 s14, s6
@@ -757,7 +757,7 @@ define amdgpu_kernel void @mul64_sext_c(ptr addrspace(1) %out, i32 %in) {
; GFX1250-LABEL: mul64_sext_c:
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 nv
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_ashr_i32 s3, s2, 31
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -874,7 +874,7 @@ define amdgpu_kernel void @mul64_zext_c(ptr addrspace(1) %out, i32 %in) {
; GFX1250-LABEL: mul64_zext_c:
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 nv
; GFX1250-NEXT: s_mov_b32 s3, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_mul_u64 s[4:5], s[2:3], 0x50
@@ -1023,7 +1023,7 @@ define amdgpu_kernel void @v_mul64_sext_c(ptr addrspace(1) %out, ptr addrspace(1
; GFX1250-LABEL: v_mul64_sext_c:
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 nv
; GFX1250-NEXT: s_mov_b32 s6, -1
; GFX1250-NEXT: s_mov_b32 s7, 0x31016000
; GFX1250-NEXT: s_mov_b32 s10, s6
@@ -1187,7 +1187,7 @@ define amdgpu_kernel void @v_mul64_zext_c(ptr addrspace(1) %out, ptr addrspace(1
; GFX1250-LABEL: v_mul64_zext_c:
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 nv
; GFX1250-NEXT: s_mov_b32 s6, -1
; GFX1250-NEXT: s_mov_b32 s7, 0x31016000
; GFX1250-NEXT: s_mov_b32 s10, s6
@@ -1347,7 +1347,7 @@ define amdgpu_kernel void @v_mul64_sext_inline_imm(ptr addrspace(1) %out, ptr ad
; GFX1250-LABEL: v_mul64_sext_inline_imm:
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 nv
; GFX1250-NEXT: s_mov_b32 s6, -1
; GFX1250-NEXT: s_mov_b32 s7, 0x31016000
; GFX1250-NEXT: s_mov_b32 s10, s6
@@ -1477,9 +1477,9 @@ define amdgpu_kernel void @s_mul_i32(ptr addrspace(1) %out, [8 x i32], i32 %a, [
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
; GFX1250-NEXT: s_clause 0x2
-; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x4c
-; GFX1250-NEXT: s_load_b32 s3, s[4:5], 0x70
-; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x4c nv
+; GFX1250-NEXT: s_load_b32 s3, s[4:5], 0x70 nv
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 nv
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_mul_i32 s2, s2, s3
; GFX1250-NEXT: s_mov_b32 s3, 0x31016000
@@ -1616,7 +1616,7 @@ define amdgpu_kernel void @v_mul_i32(ptr addrspace(1) %out, ptr addrspace(1) %in
; GFX1250-LABEL: v_mul_i32:
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 nv
; GFX1250-NEXT: s_mov_b32 s6, -1
; GFX1250-NEXT: s_mov_b32 s7, 0x31016000
; GFX1250-NEXT: s_mov_b32 s10, s6
@@ -1751,9 +1751,9 @@ define amdgpu_kernel void @s_mul_i1(ptr addrspace(1) %out, [8 x i32], i1 %a, [8
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
; GFX1250-NEXT: s_clause 0x2
-; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x4c
-; GFX1250-NEXT: s_load_b32 s3, s[4:5], 0x70
-; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x4c nv
+; GFX1250-NEXT: s_load_b32 s3, s[4:5], 0x70 nv
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 nv
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_and_b32 s2, s2, s3
; GFX1250-NEXT: s_mov_b32 s3, 0x31016000
@@ -1926,7 +1926,7 @@ define amdgpu_kernel void @v_mul_i1(ptr addrspace(1) %out, ptr addrspace(1) %in)
; GFX1250-LABEL: v_mul_i1:
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 nv
; GFX1250-NEXT: s_mov_b32 s6, -1
; GFX1250-NEXT: s_mov_b32 s7, 0x31016000
; GFX1250-NEXT: s_mov_b32 s10, s6
@@ -2107,8 +2107,8 @@ define amdgpu_kernel void @s_mul_i64(ptr addrspace(1) %out, i64 %a, i64 %b) noun
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
; GFX1250-NEXT: s_clause 0x1
-; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 nv
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 nv
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_mul_u64 s[4:5], s[2:3], s[6:7]
; GFX1250-NEXT: s_mov_b32 s3, 0x31016000
@@ -2309,8 +2309,8 @@ define amdgpu_kernel void @v_mul_i64(ptr addrspace(1) %out, ptr addrspace(1) %ap
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
; GFX1250-NEXT: s_clause 0x1
-; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_load_b64 s[8:9], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 nv
+; GFX1250-NEXT: s_load_b64 s[8:9], s[4:5], 0x34 nv
; GFX1250-NEXT: s_mov_b32 s6, -1
; GFX1250-NEXT: s_mov_b32 s7, 0x31016000
; GFX1250-NEXT: s_mov_b32 s14, s6
@@ -2574,7 +2574,7 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX1250-LABEL: mul32_in_branch:
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x34 nv
; GFX1250-NEXT: s_mov_b32 s6, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_cmp_lg_u32 s0, 0
@@ -2586,7 +2586,7 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX1250-NEXT: s_mov_b32 s6, -1
; GFX1250-NEXT: ; implicit-def: $sgpr7
; GFX1250-NEXT: .LBB15_3: ; %Flow
-; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 nv
; GFX1250-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX1250-NEXT: s_cbranch_vccnz .LBB15_5
; GFX1250-NEXT: ; %bb.4: ; %if
@@ -2669,7 +2669,7 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u64_e64 s[10:11], s[4:5], 0
; SI-NEXT: s_and_b64 vcc, exec, s[10:11]
-; SI-NEXT: s_cbranch_vccz .LBB16_4
+; SI-NEXT: s_cbranch_vccz .LBB16_2
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: v_mul_hi_u32 v0, s4, v0
@@ -2693,9 +2693,6 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
-; SI-NEXT: .LBB16_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SI-NEXT: s_branch .LBB16_2
;
; VI-LABEL: mul64_in_branch:
; VI: ; %bb.0: ; %entry
@@ -2703,7 +2700,7 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: s_mov_b64 s[8:9], 0
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u64 s[4:5], 0
-; VI-NEXT: s_cbranch_scc0 .LBB16_4
+; VI-NEXT: s_cbranch_scc0 .LBB16_2
; VI-NEXT: ; %bb.1: ; %else
; VI-NEXT: v_mov_b32_e32 v0, s6
; VI-NEXT: v_mad_u64_u32 v[0:1], s[10:11], s4, v0, 0
@@ -2725,9 +2722,6 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
-; VI-NEXT: .LBB16_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; VI-NEXT: s_branch .LBB16_2
;
; GFX9-LABEL: mul64_in_branch:
; GFX9: ; %bb.0: ; %entry
@@ -2735,7 +2729,7 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX9-NEXT: s_mov_b64 s[0:1], 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u64 s[12:13], 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB16_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB16_2
; GFX9-NEXT: ; %bb.1: ; %else
; GFX9-NEXT: s_mul_i32 s2, s12, s15
; GFX9-NEXT: s_mul_hi_u32 s3, s12, s14
@@ -2744,21 +2738,18 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX9-NEXT: s_add_i32 s3, s2, s3
; GFX9-NEXT: s_mul_i32 s2, s12, s14
; GFX9-NEXT: s_andn2_b64 vcc, exec, s[0:1]
-; GFX9-NEXT: s_cbranch_vccnz .LBB16_4
+; GFX9-NEXT: s_cbranch_vccnz .LBB16_3
; GFX9-NEXT: .LBB16_2: ; %if
; GFX9-NEXT: s_mov_b32 s3, 0xf000
; GFX9-NEXT: s_mov_b32 s2, -1
; GFX9-NEXT: s_mov_b32 s0, s10
; GFX9-NEXT: s_mov_b32 s1, s11
; GFX9-NEXT: buffer_load_dwordx2 v[0:1], off, s[0:3], 0
-; GFX9-NEXT: s_branch .LBB16_5
+; GFX9-NEXT: s_branch .LBB16_4
; GFX9-NEXT: .LBB16_3:
-; GFX9-NEXT: ; implicit-def: $sgpr2_sgpr3
-; GFX9-NEXT: s_branch .LBB16_2
-; GFX9-NEXT: .LBB16_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: .LBB16_5: ; %endif
+; GFX9-NEXT: .LBB16_4: ; %endif
; GFX9-NEXT: s_mov_b32 s11, 0xf000
; GFX9-NEXT: s_mov_b32 s10, -1
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -2770,7 +2761,7 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX10-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_cmp_lg_u64 s[12:13], 0
-; GFX10-NEXT: s_cbranch_scc0 .LBB16_3
+; GFX10-NEXT: s_cbranch_scc0 .LBB16_2
; GFX10-NEXT: ; %bb.1: ; %else
; GFX10-NEXT: s_mul_i32 s0, s12, s15
; GFX10-NEXT: s_mul_hi_u32 s1, s12, s14
@@ -2778,21 +2769,18 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX10-NEXT: s_add_i32 s0, s1, s0
; GFX10-NEXT: s_add_i32 s1, s0, s2
; GFX10-NEXT: s_mul_i32 s0, s12, s14
-; GFX10-NEXT: s_cbranch_execnz .LBB16_4
+; GFX10-NEXT: s_cbranch_execnz .LBB16_3
; GFX10-NEXT: .LBB16_2: ; %if
; GFX10-NEXT: s_mov_b32 s3, 0x31016000
; GFX10-NEXT: s_mov_b32 s2, -1
; GFX10-NEXT: s_mov_b32 s0, s10
; GFX10-NEXT: s_mov_b32 s1, s11
; GFX10-NEXT: buffer_load_dwordx2 v[0:1], off, s[0:3], 0
-; GFX10-NEXT: s_branch .LBB16_5
+; GFX10-NEXT: s_branch .LBB16_4
; GFX10-NEXT: .LBB16_3:
-; GFX10-NEXT: ; implicit-def: $sgpr0_sgpr1
-; GFX10-NEXT: s_branch .LBB16_2
-; GFX10-NEXT: .LBB16_4:
; GFX10-NEXT: v_mov_b32_e32 v0, s0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
-; GFX10-NEXT: .LBB16_5: ; %endif
+; GFX10-NEXT: .LBB16_4: ; %endif
; GFX10-NEXT: s_mov_b32 s11, 0x31016000
; GFX10-NEXT: s_mov_b32 s10, -1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -2804,7 +2792,7 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX11-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB16_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB16_2
; GFX11-NEXT: ; %bb.1: ; %else
; GFX11-NEXT: s_mul_i32 s7, s4, s7
; GFX11-NEXT: s_mul_hi_u32 s8, s4, s6
@@ -2812,20 +2800,17 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX11-NEXT: s_add_i32 s7, s8, s7
; GFX11-NEXT: s_mul_i32 s4, s4, s6
; GFX11-NEXT: s_add_i32 s5, s7, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB16_4
+; GFX11-NEXT: s_cbranch_execnz .LBB16_3
; GFX11-NEXT: .LBB16_2: ; %if
; GFX11-NEXT: s_mov_b32 s7, 0x31016000
; GFX11-NEXT: s_mov_b32 s6, -1
; GFX11-NEXT: s_mov_b32 s4, s2
; GFX11-NEXT: s_mov_b32 s5, s3
; GFX11-NEXT: buffer_load_b64 v[0:1], off, s[4:7], 0
-; GFX11-NEXT: s_branch .LBB16_5
+; GFX11-NEXT: s_branch .LBB16_4
; GFX11-NEXT: .LBB16_3:
-; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX11-NEXT: s_branch .LBB16_2
-; GFX11-NEXT: .LBB16_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX11-NEXT: .LBB16_5: ; %endif
+; GFX11-NEXT: .LBB16_4: ; %endif
; GFX11-NEXT: s_mov_b32 s3, 0x31016000
; GFX11-NEXT: s_mov_b32 s2, -1
; GFX11-NEXT: s_waitcnt vmcnt(0)
@@ -2837,23 +2822,20 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX12-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX12-NEXT: s_cbranch_scc0 .LBB16_3
+; GFX12-NEXT: s_cbranch_scc0 .LBB16_2
; GFX12-NEXT: ; %bb.1: ; %else
; GFX12-NEXT: s_mul_u64 s[4:5], s[4:5], s[6:7]
-; GFX12-NEXT: s_cbranch_execnz .LBB16_4
+; GFX12-NEXT: s_cbranch_execnz .LBB16_3
; GFX12-NEXT: .LBB16_2: ; %if
; GFX12-NEXT: s_mov_b32 s7, 0x31016000
; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_mov_b32 s4, s2
; GFX12-NEXT: s_mov_b32 s5, s3
; GFX12-NEXT: buffer_load_b64 v[0:1], off, s[4:7], null
-; GFX12-NEXT: s_branch .LBB16_5
+; GFX12-NEXT: s_branch .LBB16_4
; GFX12-NEXT: .LBB16_3:
-; GFX12-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX12-NEXT: s_branch .LBB16_2
-; GFX12-NEXT: .LBB16_4:
; GFX12-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX12-NEXT: .LBB16_5: ; %endif
+; GFX12-NEXT: .LBB16_4: ; %endif
; GFX12-NEXT: s_mov_b32 s3, 0x31016000
; GFX12-NEXT: s_mov_b32 s2, -1
; GFX12-NEXT: s_wait_loadcnt 0x0
@@ -2863,26 +2845,23 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX1250-LABEL: mul64_in_branch:
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 nv
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_cmp_lg_u64 s[12:13], 0
-; GFX1250-NEXT: s_cbranch_scc0 .LBB16_3
+; GFX1250-NEXT: s_cbranch_scc0 .LBB16_2
; GFX1250-NEXT: ; %bb.1: ; %else
; GFX1250-NEXT: s_mul_u64 s[0:1], s[12:13], s[14:15]
-; GFX1250-NEXT: s_cbranch_execnz .LBB16_4
+; GFX1250-NEXT: s_cbranch_execnz .LBB16_3
; GFX1250-NEXT: .LBB16_2: ; %if
; GFX1250-NEXT: s_mov_b32 s3, 0x31016000
; GFX1250-NEXT: s_mov_b32 s2, -1
; GFX1250-NEXT: s_mov_b32 s0, s10
; GFX1250-NEXT: s_mov_b32 s1, s11
; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[0:3], null
-; GFX1250-NEXT: s_branch .LBB16_5
+; GFX1250-NEXT: s_branch .LBB16_4
; GFX1250-NEXT: .LBB16_3:
-; GFX1250-NEXT: ; implicit-def: $sgpr0_sgpr1
-; GFX1250-NEXT: s_branch .LBB16_2
-; GFX1250-NEXT: .LBB16_4:
; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
-; GFX1250-NEXT: .LBB16_5: ; %endif
+; GFX1250-NEXT: .LBB16_4: ; %endif
; GFX1250-NEXT: s_mov_b32 s11, 0x31016000
; GFX1250-NEXT: s_mov_b32 s10, -1
; GFX1250-NEXT: s_wait_loadcnt 0x0
@@ -3235,9 +3214,9 @@ define amdgpu_kernel void @s_mul_i128(ptr addrspace(1) %out, [8 x i32], i128 %a,
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
; GFX1250-NEXT: s_clause 0x2
-; GFX1250-NEXT: s_load_b128 s[8:11], s[4:5], 0x7c
-; GFX1250-NEXT: s_load_b128 s[12:15], s[4:5], 0x4c
-; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b128 s[8:11], s[4:5], 0x7c nv
+; GFX1250-NEXT: s_load_b128 s[12:15], s[4:5], 0x4c nv
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 nv
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_mov_b64 s[4:5], 0xffffffff
; GFX1250-NEXT: s_mov_b32 s3, 0
@@ -3556,7 +3535,7 @@ define amdgpu_kernel void @v_mul_i128(ptr addrspace(1) %out, ptr addrspace(1) %a
; GFX1250-LABEL: v_mul_i128:
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c nv
; GFX1250-NEXT: v_and_b32_e32 v16, 0x3ff, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_clause 0x1
diff --git a/llvm/test/CodeGen/AMDGPU/redundant-block.mir b/llvm/test/CodeGen/AMDGPU/redundant-block.mir
new file mode 100644
index 0000000000000..5913b7060d8a1
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/redundant-block.mir
@@ -0,0 +1,51 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -run-pass=si-pre-emit-peephole %s -o - | FileCheck %s
+# Test removal of blocks that only contain an unconditional branch.
+
+# bb.1 is redundant and will be eliminated.
+---
+name: test
+body: |
+ ; CHECK-LABEL: name: test
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: SI_RETURN
+ bb.0:
+ S_NOP 0
+ S_BRANCH %bb.1
+ bb.1:
+ S_BRANCH %bb.2
+ bb.2:
+ SI_RETURN
+...
+
+
+# Entry block and self-loops cannot be eliminated.
+---
+name: negative_test
+body: |
+ ; CHECK-LABEL: name: negative_test
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_BRANCH %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: SI_RETURN
+ bb.0:
+ S_BRANCH %bb.2
+ bb.1:
+ S_BRANCH %bb.1
+ bb.2:
+ SI_RETURN
+...
diff --git a/llvm/test/CodeGen/AMDGPU/remove-not-short-exec-branch-on-unconditional-jump.mir b/llvm/test/CodeGen/AMDGPU/remove-not-short-exec-branch-on-unconditional-jump.mir
index f45f48434fa23..855c36c70b948 100644
--- a/llvm/test/CodeGen/AMDGPU/remove-not-short-exec-branch-on-unconditional-jump.mir
+++ b/llvm/test/CodeGen/AMDGPU/remove-not-short-exec-branch-on-unconditional-jump.mir
@@ -8,34 +8,21 @@ name: test
body: |
; CHECK-LABEL: name: test
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.5(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: successors: %bb.5(0x40000000), %bb.2(0x40000000)
; CHECK-NEXT: liveins: $vgpr0, $vgpr1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: $sgpr0_sgpr1 = S_MOV_B64 $exec
; CHECK-NEXT: V_CMPX_EQ_U32_nosdst_e32 0, killed $vgpr0, implicit-def $exec, implicit $exec
; CHECK-NEXT: S_CBRANCH_EXECZ %bb.5, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: successors: %bb.2(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr1, $sgpr0_sgpr1
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $sgpr2_sgpr3 = IMPLICIT_DEF
- ; CHECK-NEXT: renamable $sgpr4_sgpr5 = IMPLICIT_DEF
; CHECK-NEXT: S_BRANCH %bb.2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: successors: %bb.4(0x40000000), %bb.3(0x40000000)
+ ; CHECK-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
; CHECK-NEXT: liveins: $vgpr1, $sgpr0_sgpr1, $sgpr2_sgpr3, $sgpr4_sgpr5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr4_sgpr5, implicit-def $scc
; CHECK-NEXT: S_CBRANCH_EXECZ %bb.4, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.3:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr1, $sgpr0_sgpr1, $sgpr2_sgpr3
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $sgpr4_sgpr5 = IMPLICIT_DEF
- ; CHECK-NEXT: S_BRANCH %bb.1
+ ; CHECK-NEXT: S_BRANCH %bb.2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.4:
; CHECK-NEXT: successors: %bb.5(0x80000000)
diff --git a/llvm/test/CodeGen/AMDGPU/set-wave-priority.ll b/llvm/test/CodeGen/AMDGPU/set-wave-priority.ll
index a27d1217031ca..78b00d417a738 100644
--- a/llvm/test/CodeGen/AMDGPU/set-wave-priority.ll
+++ b/llvm/test/CodeGen/AMDGPU/set-wave-priority.ll
@@ -57,12 +57,9 @@ b:
; CHECK: buffer_load_dwordx2
; CHECK-NEXT: s_setprio 0
; CHECK: s_cbranch_vccnz [[C]]
-; CHECK: {{.*}}: ; %b
-; CHECK-NOT: s_setprio
-; CHECK: s_branch [[EXIT:.*]]
; CHECK: [[C]]: ; %c
; CHECK-NEXT: s_setprio 0
-; CHECK: s_branch [[EXIT]]
+; CHECK: s_branch [[EXIT:.*]]
; CHECK: [[EXIT]]:
define amdgpu_ps <2 x float> @setprio_follows_setprio(ptr addrspace(8) inreg %p, i32 inreg %i) "amdgpu-wave-priority-threshold"="3" {
entry:
@@ -164,11 +161,9 @@ another_load:
; CHECK: buffer_load_dwordx2
; CHECK-NEXT: s_setprio 0
; CHECK-COUNT-4: v_add_f32_e32
-; CHECK: s_cbranch_scc0 [[A:.*]]
+; CHECK: s_cbranch_scc0 [[END:.*]]
; CHECK: {{.*}}: ; %b
; CHECK-NEXT: buffer_load_dwordx2
-; CHECK: s_branch [[END:.*]]
-; CHECK: [[A]]: ; %a
; CHECK: s_branch [[END]]
; CHECK: [[END]]:
define amdgpu_ps <2 x float> @valu_insts_threshold(ptr addrspace(8) inreg %p, i32 inreg %i) "amdgpu-wave-priority-threshold"="4" {
diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll b/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
index 40b6f0262f979..0f7178efebe9f 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
@@ -15,7 +15,7 @@ define amdgpu_kernel void @sgpr_if_else_salu_br(ptr addrspace(1) %out, i32 %a, i
; SI-NEXT: s_load_dword s6, s[4:5], 0xf
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s0, 0
-; SI-NEXT: s_cbranch_scc0 .LBB0_4
+; SI-NEXT: s_cbranch_scc0 .LBB0_2
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: s_add_i32 s3, s3, s6
; SI-NEXT: s_cbranch_execnz .LBB0_3
@@ -30,9 +30,6 @@ define amdgpu_kernel void @sgpr_if_else_salu_br(ptr addrspace(1) %out, i32 %a, i
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
-; SI-NEXT: .LBB0_4:
-; SI-NEXT: ; implicit-def: $sgpr3
-; SI-NEXT: s_branch .LBB0_2
entry:
%0 = icmp eq i32 %a, 0
@@ -59,7 +56,7 @@ define amdgpu_kernel void @sgpr_if_else_salu_br_opt(ptr addrspace(1) %out, [8 x
; SI-NEXT: s_load_dword s2, s[4:5], 0x13
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s2, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: s_load_dword s0, s[4:5], 0x2e
; SI-NEXT: s_load_dword s1, s[4:5], 0x37
@@ -80,9 +77,6 @@ define amdgpu_kernel void @sgpr_if_else_salu_br_opt(ptr addrspace(1) %out, [8 x
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: ; implicit-def: $sgpr3
-; SI-NEXT: s_branch .LBB1_2
entry:
%cmp0 = icmp eq i32 %a, 0
diff --git a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
index 08cc6f9b6fff0..3fa3154272726 100644
--- a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
+++ b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
@@ -763,15 +763,14 @@ define amdgpu_ps float @test_kill_control_flow_return(i32 inreg %arg) #0 {
; SI-NEXT: s_mov_b64 s[2:3], exec
; SI-NEXT: s_andn2_b64 s[4:5], exec, s[4:5]
; SI-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5]
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_3
; SI-NEXT: ; %bb.1: ; %entry
; SI-NEXT: s_and_b64 exec, exec, s[2:3]
; SI-NEXT: s_cmp_lg_u32 s0, 0
; SI-NEXT: v_mov_b32_e32 v0, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_3
-; SI-NEXT: ; %bb.2: ; %exit
-; SI-NEXT: s_branch .LBB9_5
-; SI-NEXT: .LBB9_3: ; %bb
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
+; SI-NEXT: s_branch .LBB9_4
+; SI-NEXT: .LBB9_2: ; %bb
; SI-NEXT: ;;#ASMSTART
; SI-NEXT: v_mov_b32_e64 v7, -1
; SI-NEXT: v_nop_e64
@@ -786,12 +785,12 @@ define amdgpu_ps float @test_kill_control_flow_return(i32 inreg %arg) #0 {
; SI-NEXT: v_nop_e64
; SI-NEXT: ;;#ASMEND
; SI-NEXT: v_mov_b32_e32 v0, v7
-; SI-NEXT: s_branch .LBB9_5
-; SI-NEXT: .LBB9_4:
+; SI-NEXT: s_branch .LBB9_4
+; SI-NEXT: .LBB9_3:
; SI-NEXT: s_mov_b64 exec, 0
; SI-NEXT: exp null, off, off, off, off done vm
; SI-NEXT: s_endpgm
-; SI-NEXT: .LBB9_5:
+; SI-NEXT: .LBB9_4:
;
; GFX10-WAVE64-LABEL: test_kill_control_flow_return:
; GFX10-WAVE64: ; %bb.0: ; %entry
@@ -800,15 +799,14 @@ define amdgpu_ps float @test_kill_control_flow_return(i32 inreg %arg) #0 {
; GFX10-WAVE64-NEXT: s_cselect_b64 s[4:5], -1, 0
; GFX10-WAVE64-NEXT: s_andn2_b64 s[4:5], exec, s[4:5]
; GFX10-WAVE64-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5]
-; GFX10-WAVE64-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX10-WAVE64-NEXT: s_cbranch_scc0 .LBB9_3
; GFX10-WAVE64-NEXT: ; %bb.1: ; %entry
; GFX10-WAVE64-NEXT: s_and_b64 exec, exec, s[2:3]
; GFX10-WAVE64-NEXT: v_mov_b32_e32 v0, 0
; GFX10-WAVE64-NEXT: s_cmp_lg_u32 s0, 0
-; GFX10-WAVE64-NEXT: s_cbranch_scc0 .LBB9_3
-; GFX10-WAVE64-NEXT: ; %bb.2: ; %exit
-; GFX10-WAVE64-NEXT: s_branch .LBB9_5
-; GFX10-WAVE64-NEXT: .LBB9_3: ; %bb
+; GFX10-WAVE64-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX10-WAVE64-NEXT: s_branch .LBB9_4
+; GFX10-WAVE64-NEXT: .LBB9_2: ; %bb
; GFX10-WAVE64-NEXT: ;;#ASMSTART
; GFX10-WAVE64-NEXT: v_mov_b32_e64 v7, -1
; GFX10-WAVE64-NEXT: v_nop_e64
@@ -823,12 +821,12 @@ define amdgpu_ps float @test_kill_control_flow_return(i32 inreg %arg) #0 {
; GFX10-WAVE64-NEXT: v_nop_e64
; GFX10-WAVE64-NEXT: ;;#ASMEND
; GFX10-WAVE64-NEXT: v_mov_b32_e32 v0, v7
-; GFX10-WAVE64-NEXT: s_branch .LBB9_5
-; GFX10-WAVE64-NEXT: .LBB9_4:
+; GFX10-WAVE64-NEXT: s_branch .LBB9_4
+; GFX10-WAVE64-NEXT: .LBB9_3:
; GFX10-WAVE64-NEXT: s_mov_b64 exec, 0
; GFX10-WAVE64-NEXT: exp null, off, off, off, off done vm
; GFX10-WAVE64-NEXT: s_endpgm
-; GFX10-WAVE64-NEXT: .LBB9_5:
+; GFX10-WAVE64-NEXT: .LBB9_4:
;
; GFX10-WAVE32-LABEL: test_kill_control_flow_return:
; GFX10-WAVE32: ; %bb.0: ; %entry
@@ -837,15 +835,14 @@ define amdgpu_ps float @test_kill_control_flow_return(i32 inreg %arg) #0 {
; GFX10-WAVE32-NEXT: s_cselect_b32 s2, -1, 0
; GFX10-WAVE32-NEXT: s_andn2_b32 s2, exec_lo, s2
; GFX10-WAVE32-NEXT: s_andn2_b32 s1, s1, s2
-; GFX10-WAVE32-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX10-WAVE32-NEXT: s_cbranch_scc0 .LBB9_3
; GFX10-WAVE32-NEXT: ; %bb.1: ; %entry
; GFX10-WAVE32-NEXT: s_and_b32 exec_lo, exec_lo, s1
; GFX10-WAVE32-NEXT: v_mov_b32_e32 v0, 0
; GFX10-WAVE32-NEXT: s_cmp_lg_u32 s0, 0
-; GFX10-WAVE32-NEXT: s_cbranch_scc0 .LBB9_3
-; GFX10-WAVE32-NEXT: ; %bb.2: ; %exit
-; GFX10-WAVE32-NEXT: s_branch .LBB9_5
-; GFX10-WAVE32-NEXT: .LBB9_3: ; %bb
+; GFX10-WAVE32-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX10-WAVE32-NEXT: s_branch .LBB9_4
+; GFX10-WAVE32-NEXT: .LBB9_2: ; %bb
; GFX10-WAVE32-NEXT: ;;#ASMSTART
; GFX10-WAVE32-NEXT: v_mov_b32_e64 v7, -1
; GFX10-WAVE32-NEXT: v_nop_e64
@@ -860,12 +857,12 @@ define amdgpu_ps float @test_kill_control_flow_return(i32 inreg %arg) #0 {
; GFX10-WAVE32-NEXT: v_nop_e64
; GFX10-WAVE32-NEXT: ;;#ASMEND
; GFX10-WAVE32-NEXT: v_mov_b32_e32 v0, v7
-; GFX10-WAVE32-NEXT: s_branch .LBB9_5
-; GFX10-WAVE32-NEXT: .LBB9_4:
+; GFX10-WAVE32-NEXT: s_branch .LBB9_4
+; GFX10-WAVE32-NEXT: .LBB9_3:
; GFX10-WAVE32-NEXT: s_mov_b32 exec_lo, 0
; GFX10-WAVE32-NEXT: exp null, off, off, off, off done vm
; GFX10-WAVE32-NEXT: s_endpgm
-; GFX10-WAVE32-NEXT: .LBB9_5:
+; GFX10-WAVE32-NEXT: .LBB9_4:
;
; GFX11-LABEL: test_kill_control_flow_return:
; GFX11: ; %bb.0: ; %entry
@@ -875,15 +872,14 @@ define amdgpu_ps float @test_kill_control_flow_return(i32 inreg %arg) #0 {
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b64 s[4:5], exec, s[4:5]
; GFX11-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[4:5]
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_3
; GFX11-NEXT: ; %bb.1: ; %entry
; GFX11-NEXT: s_and_b64 exec, exec, s[2:3]
; GFX11-NEXT: v_mov_b32_e32 v0, 0
; GFX11-NEXT: s_cmp_lg_u32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_3
-; GFX11-NEXT: ; %bb.2: ; %exit
-; GFX11-NEXT: s_branch .LBB9_5
-; GFX11-NEXT: .LBB9_3: ; %bb
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: s_branch .LBB9_4
+; GFX11-NEXT: .LBB9_2: ; %bb
; GFX11-NEXT: ;;#ASMSTART
; GFX11-NEXT: v_mov_b32_e64 v7, -1
; GFX11-NEXT: v_nop_e64
@@ -898,12 +894,12 @@ define amdgpu_ps float @test_kill_control_flow_return(i32 inreg %arg) #0 {
; GFX11-NEXT: v_nop_e64
; GFX11-NEXT: ;;#ASMEND
; GFX11-NEXT: v_mov_b32_e32 v0, v7
-; GFX11-NEXT: s_branch .LBB9_5
-; GFX11-NEXT: .LBB9_4:
+; GFX11-NEXT: s_branch .LBB9_4
+; GFX11-NEXT: .LBB9_3:
; GFX11-NEXT: s_mov_b64 exec, 0
; GFX11-NEXT: exp mrt0, off, off, off, off done
; GFX11-NEXT: s_endpgm
-; GFX11-NEXT: .LBB9_5:
+; GFX11-NEXT: .LBB9_4:
entry:
%kill = icmp eq i32 %arg, 1
%cmp = icmp eq i32 %arg, 0
diff --git a/llvm/test/CodeGen/AMDGPU/srem.ll b/llvm/test/CodeGen/AMDGPU/srem.ll
index bf6dba48cb921..4c6fa36b8c8ac 100644
--- a/llvm/test/CodeGen/AMDGPU/srem.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem.ll
@@ -1502,7 +1502,7 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; GCN-NEXT: v_readfirstlane_b32 s2, v2
; GCN-NEXT: s_or_b64 s[6:7], s[4:5], s[2:3]
; GCN-NEXT: s_cmp_lg_u32 s7, 0
-; GCN-NEXT: s_cbranch_scc0 .LBB8_4
+; GCN-NEXT: s_cbranch_scc0 .LBB8_2
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_ashr_i32 s6, s3, 31
; GCN-NEXT: s_add_u32 s8, s2, s6
@@ -1653,9 +1653,6 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; GCN-NEXT: v_mov_b32_e32 v1, s9
; GCN-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GCN-NEXT: s_endpgm
-; GCN-NEXT: .LBB8_4:
-; GCN-NEXT: ; implicit-def: $sgpr8_sgpr9
-; GCN-NEXT: s_branch .LBB8_2
;
; TAHITI-LABEL: srem_i64:
; TAHITI: ; %bb.0:
@@ -1669,7 +1666,7 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; TAHITI-NEXT: s_waitcnt vmcnt(0)
; TAHITI-NEXT: v_or_b32_e32 v4, v1, v3
; TAHITI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; TAHITI-NEXT: s_cbranch_vccz .LBB8_4
+; TAHITI-NEXT: s_cbranch_vccz .LBB8_2
; TAHITI-NEXT: ; %bb.1:
; TAHITI-NEXT: v_ashrrev_i32_e32 v5, 31, v3
; TAHITI-NEXT: v_add_i32_e32 v4, vcc, v2, v5
@@ -1813,9 +1810,6 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; TAHITI-NEXT: s_mov_b32 s6, -1
; TAHITI-NEXT: buffer_store_dwordx2 v[3:4], off, s[4:7], 0
; TAHITI-NEXT: s_endpgm
-; TAHITI-NEXT: .LBB8_4:
-; TAHITI-NEXT: ; implicit-def: $vgpr3_vgpr4
-; TAHITI-NEXT: s_branch .LBB8_2
;
; TONGA-LABEL: srem_i64:
; TONGA: ; %bb.0:
@@ -1831,7 +1825,7 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; TONGA-NEXT: v_readfirstlane_b32 s2, v2
; TONGA-NEXT: s_or_b64 s[6:7], s[4:5], s[2:3]
; TONGA-NEXT: s_cmp_lg_u32 s7, 0
-; TONGA-NEXT: s_cbranch_scc0 .LBB8_3
+; TONGA-NEXT: s_cbranch_scc0 .LBB8_2
; TONGA-NEXT: ; %bb.1:
; TONGA-NEXT: s_ashr_i32 s6, s3, 31
; TONGA-NEXT: s_add_u32 s8, s2, s6
@@ -1951,7 +1945,7 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; TONGA-NEXT: s_xor_b64 s[6:7], s[6:7], s[10:11]
; TONGA-NEXT: s_sub_u32 s6, s6, s10
; TONGA-NEXT: s_subb_u32 s7, s7, s10
-; TONGA-NEXT: s_cbranch_execnz .LBB8_4
+; TONGA-NEXT: s_cbranch_execnz .LBB8_3
; TONGA-NEXT: .LBB8_2:
; TONGA-NEXT: v_cvt_f32_u32_e32 v0, s2
; TONGA-NEXT: s_sub_i32 s3, 0, s2
@@ -1971,14 +1965,11 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s2, v0
; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
; TONGA-NEXT: v_mov_b32_e32 v1, 0
-; TONGA-NEXT: s_branch .LBB8_5
+; TONGA-NEXT: s_branch .LBB8_4
; TONGA-NEXT: .LBB8_3:
-; TONGA-NEXT: ; implicit-def: $sgpr6_sgpr7
-; TONGA-NEXT: s_branch .LBB8_2
-; TONGA-NEXT: .LBB8_4:
; TONGA-NEXT: v_mov_b32_e32 v0, s6
; TONGA-NEXT: v_mov_b32_e32 v1, s7
-; TONGA-NEXT: .LBB8_5:
+; TONGA-NEXT: .LBB8_4:
; TONGA-NEXT: v_mov_b32_e32 v2, s0
; TONGA-NEXT: v_mov_b32_e32 v3, s1
; TONGA-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
@@ -2704,7 +2695,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_readfirstlane_b32 s5, v7
; GCN-NEXT: s_cmp_lg_u32 s7, 0
; GCN-NEXT: v_readfirstlane_b32 s4, v6
-; GCN-NEXT: s_cbranch_scc0 .LBB10_6
+; GCN-NEXT: s_cbranch_scc0 .LBB10_2
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_ashr_i32 s6, s9, 31
; GCN-NEXT: s_add_u32 s12, s8, s6
@@ -2852,7 +2843,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: .LBB10_3:
; GCN-NEXT: s_or_b64 s[8:9], s[4:5], s[2:3]
; GCN-NEXT: s_cmp_lg_u32 s9, 0
-; GCN-NEXT: s_cbranch_scc0 .LBB10_7
+; GCN-NEXT: s_cbranch_scc0 .LBB10_5
; GCN-NEXT: ; %bb.4:
; GCN-NEXT: s_ashr_i32 s8, s3, 31
; GCN-NEXT: s_add_u32 s10, s2, s8
@@ -2976,7 +2967,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: s_xor_b64 s[10:11], s[10:11], s[12:13]
; GCN-NEXT: s_sub_u32 s10, s10, s12
; GCN-NEXT: s_subb_u32 s11, s11, s12
-; GCN-NEXT: s_cbranch_execnz .LBB10_8
+; GCN-NEXT: s_cbranch_execnz .LBB10_6
; GCN-NEXT: .LBB10_5:
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s2
; GCN-NEXT: s_sub_i32 s3, 0, s2
@@ -2996,17 +2987,11 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_subrev_u32_e32 v1, s2, v0
; GCN-NEXT: v_cmp_le_u32_e32 vcc, s2, v0
; GCN-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-NEXT: s_branch .LBB10_9
+; GCN-NEXT: s_branch .LBB10_7
; GCN-NEXT: .LBB10_6:
-; GCN-NEXT: ; implicit-def: $sgpr6_sgpr7
-; GCN-NEXT: s_branch .LBB10_2
-; GCN-NEXT: .LBB10_7:
-; GCN-NEXT: ; implicit-def: $sgpr10_sgpr11
-; GCN-NEXT: s_branch .LBB10_5
-; GCN-NEXT: .LBB10_8:
; GCN-NEXT: v_mov_b32_e32 v2, s10
; GCN-NEXT: v_mov_b32_e32 v3, s11
-; GCN-NEXT: .LBB10_9:
+; GCN-NEXT: .LBB10_7:
; GCN-NEXT: v_mov_b32_e32 v4, 0
; GCN-NEXT: v_mov_b32_e32 v0, s6
; GCN-NEXT: v_mov_b32_e32 v1, s7
@@ -3026,7 +3011,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: s_waitcnt vmcnt(0)
; TAHITI-NEXT: v_or_b32_e32 v8, v5, v1
; TAHITI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
-; TAHITI-NEXT: s_cbranch_vccz .LBB10_7
+; TAHITI-NEXT: s_cbranch_vccz .LBB10_2
; TAHITI-NEXT: ; %bb.1:
; TAHITI-NEXT: v_ashrrev_i32_e32 v9, 31, v1
; TAHITI-NEXT: v_add_i32_e32 v8, vcc, v0, v9
@@ -3168,7 +3153,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: .LBB10_3:
; TAHITI-NEXT: v_or_b32_e32 v0, v7, v3
; TAHITI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; TAHITI-NEXT: s_cbranch_vccz .LBB10_8
+; TAHITI-NEXT: s_cbranch_vccz .LBB10_5
; TAHITI-NEXT: ; %bb.4:
; TAHITI-NEXT: v_ashrrev_i32_e32 v0, 31, v3
; TAHITI-NEXT: v_add_i32_e32 v1, vcc, v2, v0
@@ -3312,11 +3297,6 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: s_mov_b32 s6, -1
; TAHITI-NEXT: buffer_store_dwordx4 v[8:11], off, s[4:7], 0
; TAHITI-NEXT: s_endpgm
-; TAHITI-NEXT: .LBB10_7:
-; TAHITI-NEXT: ; implicit-def: $vgpr8_vgpr9
-; TAHITI-NEXT: s_branch .LBB10_2
-; TAHITI-NEXT: .LBB10_8:
-; TAHITI-NEXT: s_branch .LBB10_5
;
; TONGA-LABEL: srem_v2i64:
; TONGA: ; %bb.0:
@@ -3338,7 +3318,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_readfirstlane_b32 s2, v4
; TONGA-NEXT: s_or_b64 s[6:7], s[2:3], s[0:1]
; TONGA-NEXT: s_cmp_lg_u32 s7, 0
-; TONGA-NEXT: s_cbranch_scc0 .LBB10_3
+; TONGA-NEXT: s_cbranch_scc0 .LBB10_2
; TONGA-NEXT: ; %bb.1:
; TONGA-NEXT: s_ashr_i32 s6, s1, 31
; TONGA-NEXT: s_add_u32 s8, s0, s6
@@ -3458,7 +3438,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: s_xor_b64 s[6:7], s[6:7], s[10:11]
; TONGA-NEXT: s_sub_u32 s6, s6, s10
; TONGA-NEXT: s_subb_u32 s7, s7, s10
-; TONGA-NEXT: s_cbranch_execnz .LBB10_4
+; TONGA-NEXT: s_cbranch_execnz .LBB10_3
; TONGA-NEXT: .LBB10_2:
; TONGA-NEXT: v_cvt_f32_u32_e32 v0, s0
; TONGA-NEXT: s_sub_i32 s1, 0, s0
@@ -3478,18 +3458,15 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, s0, v0
; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s0, v0
; TONGA-NEXT: v_cndmask_b32_e32 v8, v0, v1, vcc
-; TONGA-NEXT: s_branch .LBB10_5
+; TONGA-NEXT: s_branch .LBB10_4
; TONGA-NEXT: .LBB10_3:
-; TONGA-NEXT: ; implicit-def: $sgpr6_sgpr7
-; TONGA-NEXT: s_branch .LBB10_2
-; TONGA-NEXT: .LBB10_4:
; TONGA-NEXT: v_mov_b32_e32 v9, s7
; TONGA-NEXT: v_mov_b32_e32 v8, s6
-; TONGA-NEXT: .LBB10_5:
+; TONGA-NEXT: .LBB10_4:
; TONGA-NEXT: v_or_b32_e32 v0, v7, v3
; TONGA-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; TONGA-NEXT: s_cbranch_vccz .LBB10_9
-; TONGA-NEXT: ; %bb.6:
+; TONGA-NEXT: s_cbranch_vccz .LBB10_6
+; TONGA-NEXT: ; %bb.5:
; TONGA-NEXT: v_ashrrev_i32_e32 v0, 31, v3
; TONGA-NEXT: v_add_u32_e32 v1, vcc, v2, v0
; TONGA-NEXT: v_addc_u32_e32 v3, vcc, v3, v0, vcc
@@ -3595,8 +3572,8 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_xor_b32_e32 v1, v1, v11
; TONGA-NEXT: v_sub_u32_e32 v10, vcc, v0, v11
; TONGA-NEXT: v_subb_u32_e32 v11, vcc, v1, v11, vcc
-; TONGA-NEXT: s_cbranch_execnz .LBB10_8
-; TONGA-NEXT: .LBB10_7:
+; TONGA-NEXT: s_cbranch_execnz .LBB10_7
+; TONGA-NEXT: .LBB10_6:
; TONGA-NEXT: v_cvt_f32_u32_e32 v0, v2
; TONGA-NEXT: v_sub_u32_e32 v1, vcc, 0, v2
; TONGA-NEXT: v_mov_b32_e32 v11, 0
@@ -3615,13 +3592,11 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, v2, v0
; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
; TONGA-NEXT: v_cndmask_b32_e32 v10, v0, v1, vcc
-; TONGA-NEXT: .LBB10_8:
+; TONGA-NEXT: .LBB10_7:
; TONGA-NEXT: v_mov_b32_e32 v0, s4
; TONGA-NEXT: v_mov_b32_e32 v1, s5
; TONGA-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
; TONGA-NEXT: s_endpgm
-; TONGA-NEXT: .LBB10_9:
-; TONGA-NEXT: s_branch .LBB10_7
;
; EG-LABEL: srem_v2i64:
; EG: ; %bb.0:
@@ -4882,7 +4857,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_readfirstlane_b32 s15, v15
; GCN-NEXT: s_cmp_lg_u32 s7, 0
; GCN-NEXT: v_readfirstlane_b32 s14, v14
-; GCN-NEXT: s_cbranch_scc0 .LBB12_6
+; GCN-NEXT: s_cbranch_scc0 .LBB12_2
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_ashr_i32 s6, s17, 31
; GCN-NEXT: s_add_u32 s20, s16, s6
@@ -5030,7 +5005,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: .LBB12_3:
; GCN-NEXT: s_or_b64 s[16:17], s[14:15], s[12:13]
; GCN-NEXT: s_cmp_lg_u32 s17, 0
-; GCN-NEXT: s_cbranch_scc0 .LBB12_7
+; GCN-NEXT: s_cbranch_scc0 .LBB12_5
; GCN-NEXT: ; %bb.4:
; GCN-NEXT: s_ashr_i32 s16, s13, 31
; GCN-NEXT: s_add_u32 s18, s12, s16
@@ -5154,7 +5129,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: s_xor_b64 s[18:19], s[18:19], s[20:21]
; GCN-NEXT: s_sub_u32 s18, s18, s20
; GCN-NEXT: s_subb_u32 s19, s19, s20
-; GCN-NEXT: s_cbranch_execnz .LBB12_8
+; GCN-NEXT: s_cbranch_execnz .LBB12_6
; GCN-NEXT: .LBB12_5:
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s12
; GCN-NEXT: s_sub_i32 s13, 0, s12
@@ -5174,21 +5149,15 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_subrev_u32_e32 v1, s12, v0
; GCN-NEXT: v_cmp_le_u32_e32 vcc, s12, v0
; GCN-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-NEXT: s_branch .LBB12_9
+; GCN-NEXT: s_branch .LBB12_7
; GCN-NEXT: .LBB12_6:
-; GCN-NEXT: ; implicit-def: $sgpr6_sgpr7
-; GCN-NEXT: s_branch .LBB12_2
-; GCN-NEXT: .LBB12_7:
-; GCN-NEXT: ; implicit-def: $sgpr18_sgpr19
-; GCN-NEXT: s_branch .LBB12_5
-; GCN-NEXT: .LBB12_8:
; GCN-NEXT: v_mov_b32_e32 v2, s18
; GCN-NEXT: v_mov_b32_e32 v3, s19
-; GCN-NEXT: .LBB12_9:
+; GCN-NEXT: .LBB12_7:
; GCN-NEXT: s_or_b64 s[12:13], s[10:11], s[8:9]
; GCN-NEXT: s_cmp_lg_u32 s13, 0
-; GCN-NEXT: s_cbranch_scc0 .LBB12_12
-; GCN-NEXT: ; %bb.10:
+; GCN-NEXT: s_cbranch_scc0 .LBB12_9
+; GCN-NEXT: ; %bb.8:
; GCN-NEXT: s_ashr_i32 s12, s9, 31
; GCN-NEXT: s_add_u32 s14, s8, s12
; GCN-NEXT: s_mov_b32 s13, s12
@@ -5311,8 +5280,8 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: s_xor_b64 s[14:15], s[14:15], s[16:17]
; GCN-NEXT: s_sub_u32 s14, s14, s16
; GCN-NEXT: s_subb_u32 s15, s15, s16
-; GCN-NEXT: s_cbranch_execnz .LBB12_13
-; GCN-NEXT: .LBB12_11:
+; GCN-NEXT: s_cbranch_execnz .LBB12_10
+; GCN-NEXT: .LBB12_9:
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s8
; GCN-NEXT: s_sub_i32 s9, 0, s8
; GCN-NEXT: v_mov_b32_e32 v5, 0
@@ -5331,18 +5300,15 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_subrev_u32_e32 v1, s8, v0
; GCN-NEXT: v_cmp_le_u32_e32 vcc, s8, v0
; GCN-NEXT: v_cndmask_b32_e32 v4, v0, v1, vcc
-; GCN-NEXT: s_branch .LBB12_14
-; GCN-NEXT: .LBB12_12:
-; GCN-NEXT: ; implicit-def: $sgpr14_sgpr15
; GCN-NEXT: s_branch .LBB12_11
-; GCN-NEXT: .LBB12_13:
+; GCN-NEXT: .LBB12_10:
; GCN-NEXT: v_mov_b32_e32 v4, s14
; GCN-NEXT: v_mov_b32_e32 v5, s15
-; GCN-NEXT: .LBB12_14:
+; GCN-NEXT: .LBB12_11:
; GCN-NEXT: s_or_b64 s[8:9], s[4:5], s[2:3]
; GCN-NEXT: s_cmp_lg_u32 s9, 0
-; GCN-NEXT: s_cbranch_scc0 .LBB12_17
-; GCN-NEXT: ; %bb.15:
+; GCN-NEXT: s_cbranch_scc0 .LBB12_13
+; GCN-NEXT: ; %bb.12:
; GCN-NEXT: s_ashr_i32 s8, s3, 31
; GCN-NEXT: s_add_u32 s10, s2, s8
; GCN-NEXT: s_mov_b32 s9, s8
@@ -5465,8 +5431,8 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: s_xor_b64 s[10:11], s[10:11], s[12:13]
; GCN-NEXT: s_sub_u32 s10, s10, s12
; GCN-NEXT: s_subb_u32 s11, s11, s12
-; GCN-NEXT: s_cbranch_execnz .LBB12_18
-; GCN-NEXT: .LBB12_16:
+; GCN-NEXT: s_cbranch_execnz .LBB12_14
+; GCN-NEXT: .LBB12_13:
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s2
; GCN-NEXT: s_sub_i32 s3, 0, s2
; GCN-NEXT: v_mov_b32_e32 v7, 0
@@ -5485,14 +5451,11 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_subrev_u32_e32 v1, s2, v0
; GCN-NEXT: v_cmp_le_u32_e32 vcc, s2, v0
; GCN-NEXT: v_cndmask_b32_e32 v6, v0, v1, vcc
-; GCN-NEXT: s_branch .LBB12_19
-; GCN-NEXT: .LBB12_17:
-; GCN-NEXT: ; implicit-def: $sgpr10_sgpr11
-; GCN-NEXT: s_branch .LBB12_16
-; GCN-NEXT: .LBB12_18:
+; GCN-NEXT: s_branch .LBB12_15
+; GCN-NEXT: .LBB12_14:
; GCN-NEXT: v_mov_b32_e32 v6, s10
; GCN-NEXT: v_mov_b32_e32 v7, s11
-; GCN-NEXT: .LBB12_19:
+; GCN-NEXT: .LBB12_15:
; GCN-NEXT: v_mov_b32_e32 v8, 0
; GCN-NEXT: v_mov_b32_e32 v0, s6
; GCN-NEXT: v_mov_b32_e32 v1, s7
@@ -5515,7 +5478,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: s_waitcnt vmcnt(2)
; TAHITI-NEXT: v_or_b32_e32 v8, v15, v11
; TAHITI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
-; TAHITI-NEXT: s_cbranch_vccz .LBB12_13
+; TAHITI-NEXT: s_cbranch_vccz .LBB12_2
; TAHITI-NEXT: ; %bb.1:
; TAHITI-NEXT: v_ashrrev_i32_e32 v8, 31, v11
; TAHITI-NEXT: v_add_i32_e32 v9, vcc, v10, v8
@@ -5657,7 +5620,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: .LBB12_3:
; TAHITI-NEXT: v_or_b32_e32 v10, v17, v13
; TAHITI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
-; TAHITI-NEXT: s_cbranch_vccz .LBB12_14
+; TAHITI-NEXT: s_cbranch_vccz .LBB12_5
; TAHITI-NEXT: ; %bb.4:
; TAHITI-NEXT: v_ashrrev_i32_e32 v10, 31, v13
; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v12, v10
@@ -5800,7 +5763,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: s_waitcnt vmcnt(0)
; TAHITI-NEXT: v_or_b32_e32 v12, v5, v1
; TAHITI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
-; TAHITI-NEXT: s_cbranch_vccz .LBB12_15
+; TAHITI-NEXT: s_cbranch_vccz .LBB12_8
; TAHITI-NEXT: ; %bb.7:
; TAHITI-NEXT: v_ashrrev_i32_e32 v13, 31, v1
; TAHITI-NEXT: v_add_i32_e32 v12, vcc, v0, v13
@@ -5942,7 +5905,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: .LBB12_9:
; TAHITI-NEXT: v_or_b32_e32 v0, v7, v3
; TAHITI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; TAHITI-NEXT: s_cbranch_vccz .LBB12_16
+; TAHITI-NEXT: s_cbranch_vccz .LBB12_11
; TAHITI-NEXT: ; %bb.10:
; TAHITI-NEXT: v_ashrrev_i32_e32 v0, 31, v3
; TAHITI-NEXT: v_add_i32_e32 v1, vcc, v2, v0
@@ -6087,16 +6050,6 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: buffer_store_dwordx4 v[12:15], off, s[4:7], 0 offset:16
; TAHITI-NEXT: buffer_store_dwordx4 v[8:11], off, s[4:7], 0
; TAHITI-NEXT: s_endpgm
-; TAHITI-NEXT: .LBB12_13:
-; TAHITI-NEXT: ; implicit-def: $vgpr8_vgpr9
-; TAHITI-NEXT: s_branch .LBB12_2
-; TAHITI-NEXT: .LBB12_14:
-; TAHITI-NEXT: s_branch .LBB12_5
-; TAHITI-NEXT: .LBB12_15:
-; TAHITI-NEXT: ; implicit-def: $vgpr12_vgpr13
-; TAHITI-NEXT: s_branch .LBB12_8
-; TAHITI-NEXT: .LBB12_16:
-; TAHITI-NEXT: s_branch .LBB12_11
;
; TONGA-LABEL: srem_v4i64:
; TONGA: ; %bb.0:
@@ -6128,7 +6081,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_readfirstlane_b32 s0, v10
; TONGA-NEXT: s_or_b64 s[6:7], s[2:3], s[0:1]
; TONGA-NEXT: s_cmp_lg_u32 s7, 0
-; TONGA-NEXT: s_cbranch_scc0 .LBB12_3
+; TONGA-NEXT: s_cbranch_scc0 .LBB12_2
; TONGA-NEXT: ; %bb.1:
; TONGA-NEXT: s_ashr_i32 s6, s1, 31
; TONGA-NEXT: s_add_u32 s8, s0, s6
@@ -6248,7 +6201,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: s_xor_b64 s[6:7], s[6:7], s[10:11]
; TONGA-NEXT: s_sub_u32 s6, s6, s10
; TONGA-NEXT: s_subb_u32 s7, s7, s10
-; TONGA-NEXT: s_cbranch_execnz .LBB12_4
+; TONGA-NEXT: s_cbranch_execnz .LBB12_3
; TONGA-NEXT: .LBB12_2:
; TONGA-NEXT: v_cvt_f32_u32_e32 v8, s0
; TONGA-NEXT: s_sub_i32 s1, 0, s0
@@ -6268,18 +6221,15 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s0, v8
; TONGA-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
; TONGA-NEXT: v_mov_b32_e32 v9, 0
-; TONGA-NEXT: s_branch .LBB12_5
+; TONGA-NEXT: s_branch .LBB12_4
; TONGA-NEXT: .LBB12_3:
-; TONGA-NEXT: ; implicit-def: $sgpr6_sgpr7
-; TONGA-NEXT: s_branch .LBB12_2
-; TONGA-NEXT: .LBB12_4:
; TONGA-NEXT: v_mov_b32_e32 v9, s7
; TONGA-NEXT: v_mov_b32_e32 v8, s6
-; TONGA-NEXT: .LBB12_5:
+; TONGA-NEXT: .LBB12_4:
; TONGA-NEXT: v_or_b32_e32 v10, v17, v13
; TONGA-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
-; TONGA-NEXT: s_cbranch_vccz .LBB12_15
-; TONGA-NEXT: ; %bb.6:
+; TONGA-NEXT: s_cbranch_vccz .LBB12_6
+; TONGA-NEXT: ; %bb.5:
; TONGA-NEXT: v_ashrrev_i32_e32 v10, 31, v13
; TONGA-NEXT: v_add_u32_e32 v11, vcc, v12, v10
; TONGA-NEXT: v_addc_u32_e32 v13, vcc, v13, v10, vcc
@@ -6385,8 +6335,8 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_xor_b32_e32 v13, v10, v19
; TONGA-NEXT: v_sub_u32_e32 v10, vcc, v11, v19
; TONGA-NEXT: v_subb_u32_e32 v11, vcc, v13, v19, vcc
-; TONGA-NEXT: s_cbranch_execnz .LBB12_8
-; TONGA-NEXT: .LBB12_7:
+; TONGA-NEXT: s_cbranch_execnz .LBB12_7
+; TONGA-NEXT: .LBB12_6:
; TONGA-NEXT: v_cvt_f32_u32_e32 v10, v12
; TONGA-NEXT: v_sub_u32_e32 v11, vcc, 0, v12
; TONGA-NEXT: v_rcp_iflag_f32_e32 v10, v10
@@ -6405,12 +6355,12 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v10, v12
; TONGA-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc
; TONGA-NEXT: v_mov_b32_e32 v11, 0
-; TONGA-NEXT: .LBB12_8:
+; TONGA-NEXT: .LBB12_7:
; TONGA-NEXT: s_waitcnt vmcnt(0)
; TONGA-NEXT: v_or_b32_e32 v12, v5, v1
; TONGA-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
-; TONGA-NEXT: s_cbranch_vccz .LBB12_16
-; TONGA-NEXT: ; %bb.9:
+; TONGA-NEXT: s_cbranch_vccz .LBB12_9
+; TONGA-NEXT: ; %bb.8:
; TONGA-NEXT: v_ashrrev_i32_e32 v12, 31, v1
; TONGA-NEXT: v_add_u32_e32 v13, vcc, v0, v12
; TONGA-NEXT: v_addc_u32_e32 v1, vcc, v1, v12, vcc
@@ -6516,8 +6466,8 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_xor_b32_e32 v1, v1, v16
; TONGA-NEXT: v_sub_u32_e32 v12, vcc, v5, v16
; TONGA-NEXT: v_subb_u32_e32 v13, vcc, v1, v16, vcc
-; TONGA-NEXT: s_cbranch_execnz .LBB12_11
-; TONGA-NEXT: .LBB12_10:
+; TONGA-NEXT: s_cbranch_execnz .LBB12_10
+; TONGA-NEXT: .LBB12_9:
; TONGA-NEXT: v_cvt_f32_u32_e32 v1, v0
; TONGA-NEXT: v_sub_u32_e32 v5, vcc, 0, v0
; TONGA-NEXT: v_mov_b32_e32 v13, 0
@@ -6536,11 +6486,11 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_subrev_u32_e32 v4, vcc, v0, v1
; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v0
; TONGA-NEXT: v_cndmask_b32_e32 v12, v1, v4, vcc
-; TONGA-NEXT: .LBB12_11:
+; TONGA-NEXT: .LBB12_10:
; TONGA-NEXT: v_or_b32_e32 v0, v7, v3
; TONGA-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; TONGA-NEXT: s_cbranch_vccz .LBB12_17
-; TONGA-NEXT: ; %bb.12:
+; TONGA-NEXT: s_cbranch_vccz .LBB12_12
+; TONGA-NEXT: ; %bb.11:
; TONGA-NEXT: v_ashrrev_i32_e32 v0, 31, v3
; TONGA-NEXT: v_add_u32_e32 v1, vcc, v2, v0
; TONGA-NEXT: v_addc_u32_e32 v3, vcc, v3, v0, vcc
@@ -6646,8 +6596,8 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_xor_b32_e32 v1, v1, v15
; TONGA-NEXT: v_sub_u32_e32 v14, vcc, v0, v15
; TONGA-NEXT: v_subb_u32_e32 v15, vcc, v1, v15, vcc
-; TONGA-NEXT: s_cbranch_execnz .LBB12_14
-; TONGA-NEXT: .LBB12_13:
+; TONGA-NEXT: s_cbranch_execnz .LBB12_13
+; TONGA-NEXT: .LBB12_12:
; TONGA-NEXT: v_cvt_f32_u32_e32 v0, v2
; TONGA-NEXT: v_sub_u32_e32 v1, vcc, 0, v2
; TONGA-NEXT: v_mov_b32_e32 v15, 0
@@ -6666,7 +6616,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, v2, v0
; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
; TONGA-NEXT: v_cndmask_b32_e32 v14, v0, v1, vcc
-; TONGA-NEXT: .LBB12_14:
+; TONGA-NEXT: .LBB12_13:
; TONGA-NEXT: v_mov_b32_e32 v0, s4
; TONGA-NEXT: v_mov_b32_e32 v1, s5
; TONGA-NEXT: s_add_u32 s0, s4, 16
@@ -6676,13 +6626,6 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_mov_b32_e32 v1, s1
; TONGA-NEXT: flat_store_dwordx4 v[0:1], v[12:15]
; TONGA-NEXT: s_endpgm
-; TONGA-NEXT: .LBB12_15:
-; TONGA-NEXT: s_branch .LBB12_7
-; TONGA-NEXT: .LBB12_16:
-; TONGA-NEXT: ; implicit-def: $vgpr12_vgpr13
-; TONGA-NEXT: s_branch .LBB12_10
-; TONGA-NEXT: .LBB12_17:
-; TONGA-NEXT: s_branch .LBB12_13
;
; EG-LABEL: srem_v4i64:
; EG: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/transform-block-with-return-to-epilog.ll b/llvm/test/CodeGen/AMDGPU/transform-block-with-return-to-epilog.ll
index 77d62a3a9a8cd..30c9aaf61e3be 100644
--- a/llvm/test/CodeGen/AMDGPU/transform-block-with-return-to-epilog.ll
+++ b/llvm/test/CodeGen/AMDGPU/transform-block-with-return-to-epilog.ll
@@ -15,16 +15,11 @@ entry:
define amdgpu_ps float @test_return_to_epilog_into_end_block(i32 inreg %a, float %b) #0 {
; GCN-LABEL: name: test_return_to_epilog_into_end_block
; GCN: bb.0.entry:
- ; GCN-NEXT: successors: %bb.1(0x80000000), %bb.2(0x00000000)
+ ; GCN-NEXT: successors: %bb.3(0x80000000), %bb.2(0x00000000)
; GCN-NEXT: liveins: $sgpr2, $vgpr0
; GCN-NEXT: {{ $}}
; GCN-NEXT: S_CMP_LT_I32 killed renamable $sgpr2, 1, implicit-def $scc
; GCN-NEXT: S_CBRANCH_SCC1 %bb.2, implicit killed $scc
- ; GCN-NEXT: {{ $}}
- ; GCN-NEXT: bb.1.if:
- ; GCN-NEXT: successors: %bb.3(0x80000000)
- ; GCN-NEXT: liveins: $vgpr0
- ; GCN-NEXT: {{ $}}
; GCN-NEXT: S_BRANCH %bb.3
; GCN-NEXT: {{ $}}
; GCN-NEXT: bb.2.else:
@@ -48,16 +43,11 @@ else: ; preds = %entry
define amdgpu_ps float @test_unify_return_to_epilog_into_end_block(i32 inreg %a, i32 inreg %b, float %c, float %d) #0 {
; GCN-LABEL: name: test_unify_return_to_epilog_into_end_block
; GCN: bb.0.entry:
- ; GCN-NEXT: successors: %bb.1(0x50000000), %bb.2(0x30000000)
+ ; GCN-NEXT: successors: %bb.5(0x50000000), %bb.2(0x30000000)
; GCN-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1
; GCN-NEXT: {{ $}}
; GCN-NEXT: S_CMP_LT_I32 killed renamable $sgpr2, 1, implicit-def $scc
; GCN-NEXT: S_CBRANCH_SCC1 %bb.2, implicit killed $scc
- ; GCN-NEXT: {{ $}}
- ; GCN-NEXT: bb.1.if:
- ; GCN-NEXT: successors: %bb.5(0x80000000)
- ; GCN-NEXT: liveins: $vgpr0
- ; GCN-NEXT: {{ $}}
; GCN-NEXT: S_BRANCH %bb.5
; GCN-NEXT: {{ $}}
; GCN-NEXT: bb.2.else.if.cond:
diff --git a/llvm/test/CodeGen/AMDGPU/wave32.ll b/llvm/test/CodeGen/AMDGPU/wave32.ll
index 06f28f05c20be..6308b2efaa66e 100644
--- a/llvm/test/CodeGen/AMDGPU/wave32.ll
+++ b/llvm/test/CodeGen/AMDGPU/wave32.ll
@@ -518,7 +518,7 @@ define amdgpu_kernel void @test_loop_with_if_else_break(ptr addrspace(1) %arg) #
; GFX1032-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_mov_b32 s2, 0
; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX1032-NEXT: s_cbranch_execz .LBB11_6
+; GFX1032-NEXT: s_cbranch_execz .LBB11_5
; GFX1032-NEXT: ; %bb.1: ; %.preheader
; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX1032-NEXT: v_min_u32_e32 v1, 0x100, v0
@@ -541,7 +541,7 @@ define amdgpu_kernel void @test_loop_with_if_else_break(ptr addrspace(1) %arg) #
; GFX1032-NEXT: s_and_b32 s5, exec_lo, s4
; GFX1032-NEXT: s_or_b32 s2, s5, s2
; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
-; GFX1032-NEXT: s_cbranch_execz .LBB11_6
+; GFX1032-NEXT: s_cbranch_execz .LBB11_5
; GFX1032-NEXT: .LBB11_4: ; %bb2
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
@@ -550,11 +550,8 @@ define amdgpu_kernel void @test_loop_with_if_else_break(ptr addrspace(1) %arg) #
; GFX1032-NEXT: s_waitcnt vmcnt(0)
; GFX1032-NEXT: v_cmp_gt_i32_e32 vcc_lo, 11, v3
; GFX1032-NEXT: s_cbranch_vccz .LBB11_2
-; GFX1032-NEXT: ; %bb.5: ; in Loop: Header=BB11_4 Depth=1
-; GFX1032-NEXT: ; implicit-def: $sgpr3
-; GFX1032-NEXT: ; implicit-def: $sgpr0_sgpr1
; GFX1032-NEXT: s_branch .LBB11_3
-; GFX1032-NEXT: .LBB11_6: ; %.loopexit
+; GFX1032-NEXT: .LBB11_5: ; %.loopexit
; GFX1032-NEXT: s_endpgm
;
; GFX1064-LABEL: test_loop_with_if_else_break:
@@ -562,7 +559,7 @@ define amdgpu_kernel void @test_loop_with_if_else_break(ptr addrspace(1) %arg) #
; GFX1064-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX1064-NEXT: s_mov_b32 s6, 0
; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX1064-NEXT: s_cbranch_execz .LBB11_6
+; GFX1064-NEXT: s_cbranch_execz .LBB11_5
; GFX1064-NEXT: ; %bb.1: ; %.preheader
; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX1064-NEXT: v_min_u32_e32 v1, 0x100, v0
@@ -585,7 +582,7 @@ define amdgpu_kernel void @test_loop_with_if_else_break(ptr addrspace(1) %arg) #
; GFX1064-NEXT: s_and_b64 s[8:9], exec, s[4:5]
; GFX1064-NEXT: s_or_b64 s[2:3], s[8:9], s[2:3]
; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX1064-NEXT: s_cbranch_execz .LBB11_6
+; GFX1064-NEXT: s_cbranch_execz .LBB11_5
; GFX1064-NEXT: .LBB11_4: ; %bb2
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
@@ -594,11 +591,8 @@ define amdgpu_kernel void @test_loop_with_if_else_break(ptr addrspace(1) %arg) #
; GFX1064-NEXT: s_waitcnt vmcnt(0)
; GFX1064-NEXT: v_cmp_gt_i32_e32 vcc, 11, v3
; GFX1064-NEXT: s_cbranch_vccz .LBB11_2
-; GFX1064-NEXT: ; %bb.5: ; in Loop: Header=BB11_4 Depth=1
-; GFX1064-NEXT: ; implicit-def: $sgpr6
-; GFX1064-NEXT: ; implicit-def: $sgpr0_sgpr1
; GFX1064-NEXT: s_branch .LBB11_3
-; GFX1064-NEXT: .LBB11_6: ; %.loopexit
+; GFX1064-NEXT: .LBB11_5: ; %.loopexit
; GFX1064-NEXT: s_endpgm
bb:
%tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
@@ -737,7 +731,7 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032-NEXT: s_or_b64 s[4:5], s[2:3], s[0:1]
; GFX1032-NEXT: s_cmp_lg_u32 s5, 0
-; GFX1032-NEXT: s_cbranch_scc0 .LBB15_4
+; GFX1032-NEXT: s_cbranch_scc0 .LBB15_2
; GFX1032-NEXT: ; %bb.1:
; GFX1032-NEXT: v_cvt_f32_u32_e32 v0, s0
; GFX1032-NEXT: v_cvt_f32_u32_e32 v1, s1
@@ -877,9 +871,6 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
; GFX1032-NEXT: v_mov_b32_e32 v1, s5
; GFX1032-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] offset:16
; GFX1032-NEXT: s_endpgm
-; GFX1032-NEXT: .LBB15_4:
-; GFX1032-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX1032-NEXT: s_branch .LBB15_2
;
; GFX1064-LABEL: test_udiv64:
; GFX1064: ; %bb.0: ; %bb
@@ -889,7 +880,7 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064-NEXT: s_or_b64 s[4:5], s[2:3], s[0:1]
; GFX1064-NEXT: s_cmp_lg_u32 s5, 0
-; GFX1064-NEXT: s_cbranch_scc0 .LBB15_4
+; GFX1064-NEXT: s_cbranch_scc0 .LBB15_2
; GFX1064-NEXT: ; %bb.1:
; GFX1064-NEXT: v_cvt_f32_u32_e32 v0, s0
; GFX1064-NEXT: v_cvt_f32_u32_e32 v1, s1
@@ -1028,9 +1019,6 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
; GFX1064-NEXT: v_mov_b32_e32 v1, s5
; GFX1064-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] offset:16
; GFX1064-NEXT: s_endpgm
-; GFX1064-NEXT: .LBB15_4:
-; GFX1064-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX1064-NEXT: s_branch .LBB15_2
bb:
%tmp = getelementptr inbounds i64, ptr addrspace(1) %arg, i64 1
%tmp1 = load i64, ptr addrspace(1) %tmp, align 8
@@ -2278,30 +2266,22 @@ define amdgpu_kernel void @test_branch_true() #2 {
; GFX1032: ; %bb.0: ; %entry
; GFX1032-NEXT: s_mov_b32 vcc_lo, exec_lo
; GFX1032-NEXT: s_cbranch_execnz .LBB45_2
-; GFX1032-NEXT: ; %bb.1: ; %for.body.lr.ph
-; GFX1032-NEXT: s_branch .LBB45_3
-; GFX1032-NEXT: .LBB45_2: ; %Flow
-; GFX1032-NEXT: s_branch .LBB45_5
-; GFX1032-NEXT: .LBB45_3: ; %for.body
+; GFX1032-NEXT: s_branch .LBB45_1
+; GFX1032-NEXT: .LBB45_1: ; %for.body
; GFX1032-NEXT: s_mov_b32 vcc_lo, 0
-; GFX1032-NEXT: ; %bb.4: ; %for.end.loopexit
; GFX1032-NEXT: s_branch .LBB45_2
-; GFX1032-NEXT: .LBB45_5: ; %for.end
+; GFX1032-NEXT: .LBB45_2: ; %for.end
; GFX1032-NEXT: s_endpgm
;
; GFX1064-LABEL: test_branch_true:
; GFX1064: ; %bb.0: ; %entry
; GFX1064-NEXT: s_mov_b64 vcc, exec
; GFX1064-NEXT: s_cbranch_execnz .LBB45_2
-; GFX1064-NEXT: ; %bb.1: ; %for.body.lr.ph
-; GFX1064-NEXT: s_branch .LBB45_3
-; GFX1064-NEXT: .LBB45_2: ; %Flow
-; GFX1064-NEXT: s_branch .LBB45_5
-; GFX1064-NEXT: .LBB45_3: ; %for.body
+; GFX1064-NEXT: s_branch .LBB45_1
+; GFX1064-NEXT: .LBB45_1: ; %for.body
; GFX1064-NEXT: s_mov_b64 vcc, 0
-; GFX1064-NEXT: ; %bb.4: ; %for.end.loopexit
; GFX1064-NEXT: s_branch .LBB45_2
-; GFX1064-NEXT: .LBB45_5: ; %for.end
+; GFX1064-NEXT: .LBB45_2: ; %for.end
; GFX1064-NEXT: s_endpgm
entry:
br i1 true, label %for.end, label %for.body.lr.ph
diff --git a/llvm/test/CodeGen/AMDGPU/xor.ll b/llvm/test/CodeGen/AMDGPU/xor.ll
index 92280b9ad8acf..5640b66eb3720 100644
--- a/llvm/test/CodeGen/AMDGPU/xor.ll
+++ b/llvm/test/CodeGen/AMDGPU/xor.ll
@@ -552,7 +552,7 @@ define amdgpu_kernel void @xor_cf(ptr addrspace(1) %out, ptr addrspace(1) %in, i
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u64_e64 s[10:11], s[4:5], 0
; SI-NEXT: s_and_b64 vcc, exec, s[10:11]
-; SI-NEXT: s_cbranch_vccz .LBB14_4
+; SI-NEXT: s_cbranch_vccz .LBB14_2
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: s_mov_b32 s15, 0xf000
; SI-NEXT: s_mov_b32 s14, -1
@@ -572,9 +572,6 @@ define amdgpu_kernel void @xor_cf(ptr addrspace(1) %out, ptr addrspace(1) %in, i
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
-; SI-NEXT: .LBB14_4:
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SI-NEXT: s_branch .LBB14_2
;
; VI-LABEL: xor_cf:
; VI: ; %bb.0: ; %entry
@@ -582,7 +579,7 @@ define amdgpu_kernel void @xor_cf(ptr addrspace(1) %out, ptr addrspace(1) %in, i
; VI-NEXT: s_mov_b64 s[8:9], 0
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u64 s[4:5], 0
-; VI-NEXT: s_cbranch_scc0 .LBB14_4
+; VI-NEXT: s_cbranch_scc0 .LBB14_2
; VI-NEXT: ; %bb.1: ; %else
; VI-NEXT: v_mov_b32_e32 v0, s2
; VI-NEXT: v_mov_b32_e32 v1, s3
@@ -600,9 +597,6 @@ define amdgpu_kernel void @xor_cf(ptr addrspace(1) %out, ptr addrspace(1) %in, i
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; VI-NEXT: s_endpgm
-; VI-NEXT: .LBB14_4:
-; VI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; VI-NEXT: s_branch .LBB14_2
entry:
%0 = icmp eq i64 %a, 0
br i1 %0, label %if, label %else
More information about the llvm-commits
mailing list