[llvm-branch-commits] [llvm] [AMDGPU] DPP wave reduction for double types - 2 (PR #189391)
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Mon Mar 30 07:10:15 PDT 2026
https://github.com/easyonaadit created https://github.com/llvm/llvm-project/pull/189391
Supported Ops: `fadd` and `fsub`
>From 782be01e70b255e75208bf4557dddf415628b72b Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Mon, 30 Mar 2026 19:39:15 +0530
Subject: [PATCH] [AMDGPU] DPP wave reduction for double types - 2
Supported Ops: `fadd` and `fsub`
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 22 +-
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.fadd.ll | 1138 ++++++++++++++--
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.fsub.ll | 1160 +++++++++++++++--
3 files changed, 2050 insertions(+), 270 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index dcc342638c5c1..94b27973b83f0 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5745,6 +5745,8 @@ getDPPOpcForWaveReduction(unsigned Opc, const GCNSubtarget &ST) {
case AMDGPU::V_MIN_F64_e64:
case AMDGPU::V_MAX_NUM_F64_e64:
case AMDGPU::V_MAX_F64_e64:
+ case AMDGPU::V_ADD_F64_pseudo_e64:
+ case AMDGPU::V_ADD_F64_e64:
DPPOpc = AMDGPU::V_MOV_B64_DPP_PSEUDO;
break;
default:
@@ -6579,17 +6581,17 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
}
FinalDPPResult = RowBcast31;
}
- if (Opc == AMDGPU::V_SUB_F32_e64) {
- Register NegatedValVGPR =
- MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
- BuildMI(*CurrBB, MI, DL, TII->get(AMDGPU::V_SUB_F32_e64),
+ if (Opc == AMDGPU::V_SUB_F32_e64 ||
+ MI.getOpcode() == AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64) {
+ Register NegatedValVGPR = MRI.createVirtualRegister(SrcRegClass);
+ BuildMI(*CurrBB, MI, DL, TII->get(Opc),
NegatedValVGPR)
- .addImm(SISrcMods::NONE) // src0 mods
- .addReg(IdentityVGPR) // src0
- .addImm(SISrcMods::NONE) // src1 mods
- .addReg(IsWave32 ? RowBcast15 : RowBcast31) // src1
- .addImm(SISrcMods::NONE) // clamp
- .addImm(SISrcMods::NONE); // omod
+ .addImm(SISrcMods::NONE) // src0 mods
+ .addReg(IdentityVGPR) // src0
+ .addImm(is32BitOpc ? SISrcMods::NONE : SISrcMods::NEG) // src1 mods
+ .addReg(IsWave32 ? RowBcast15 : RowBcast31) // src1
+ .addImm(SISrcMods::NONE) // clamp
+ .addImm(SISrcMods::NONE); // omod
FinalDPPResult = NegatedValVGPR;
}
// The final reduced value is in the last lane.
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fadd.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fadd.ll
index 8ece344b61234..1906687709c1e 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fadd.ll
@@ -950,6 +950,884 @@ entry:
ret void
}
+define void @divergent_value_double_dpp(ptr addrspace(1) %out, double %in) {
+; GFX8DAGISEL-LABEL: divergent_value_double_dpp:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX8DAGISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX8DAGISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX8DAGISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX8DAGISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX8DAGISEL-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX8DAGISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX8DAGISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX8DAGISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX8DAGISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX8DAGISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[4:5], -1
+; GFX8DAGISEL-NEXT: v_bfrev_b32_e32 v4, 1
+; GFX8DAGISEL-NEXT: v_cndmask_b32_e64 v5, 0, v2, s[4:5]
+; GFX8DAGISEL-NEXT: v_cndmask_b32_e64 v6, v4, v3, s[4:5]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v8, v6
+; GFX8DAGISEL-NEXT: s_nop 0
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v8, v8 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_add_f64 v[4:5], v[5:6], v[7:8]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8DAGISEL-NEXT: s_nop 0
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8DAGISEL-NEXT: s_nop 0
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8DAGISEL-NEXT: s_nop 0
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8DAGISEL-NEXT: s_nop 0
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_bcast:15 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_bcast:15 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8DAGISEL-NEXT: s_nop 0
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_bcast:31 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_bcast:31 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8DAGISEL-NEXT: v_readlane_b32 s6, v4, 63
+; GFX8DAGISEL-NEXT: v_readlane_b32 s7, v5, 63
+; GFX8DAGISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s6
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s7
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX8DAGISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX8DAGISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX8DAGISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX8DAGISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX8DAGISEL-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX8DAGISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX8DAGISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX8DAGISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX8DAGISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX8DAGISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8GISEL-LABEL: divergent_value_double_dpp:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8GISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX8GISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX8GISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX8GISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX8GISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX8GISEL-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX8GISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX8GISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX8GISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX8GISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX8GISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX8GISEL-NEXT: s_or_saveexec_b64 s[4:5], -1
+; GFX8GISEL-NEXT: v_bfrev_b32_e32 v4, 1
+; GFX8GISEL-NEXT: v_cndmask_b32_e64 v5, 0, v2, s[4:5]
+; GFX8GISEL-NEXT: v_cndmask_b32_e64 v6, v4, v3, s[4:5]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8GISEL-NEXT: v_mov_b32_e32 v8, v6
+; GFX8GISEL-NEXT: s_nop 0
+; GFX8GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_mov_b32_dpp v8, v8 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_add_f64 v[4:5], v[5:6], v[7:8]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8GISEL-NEXT: s_nop 0
+; GFX8GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8GISEL-NEXT: s_nop 0
+; GFX8GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8GISEL-NEXT: s_nop 0
+; GFX8GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8GISEL-NEXT: s_nop 0
+; GFX8GISEL-NEXT: v_mov_b32_dpp v6, v6 row_bcast:15 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_mov_b32_dpp v7, v7 row_bcast:15 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8GISEL-NEXT: s_nop 0
+; GFX8GISEL-NEXT: v_mov_b32_dpp v6, v6 row_bcast:31 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_mov_b32_dpp v7, v7 row_bcast:31 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8GISEL-NEXT: v_readlane_b32 s6, v4, 63
+; GFX8GISEL-NEXT: v_readlane_b32 s7, v5, 63
+; GFX8GISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8GISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX8GISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX8GISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX8GISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX8GISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX8GISEL-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX8GISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX8GISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX8GISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX8GISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX8GISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9DAGISEL-LABEL: divergent_value_double_dpp:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9DAGISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9DAGISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9DAGISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9DAGISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9DAGISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9DAGISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[4:5], -1
+; GFX9DAGISEL-NEXT: v_bfrev_b32_e32 v4, 1
+; GFX9DAGISEL-NEXT: v_cndmask_b32_e64 v5, 0, v2, s[4:5]
+; GFX9DAGISEL-NEXT: v_cndmask_b32_e64 v6, v4, v3, s[4:5]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v8, v6
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v8, v8 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_add_f64 v[4:5], v[5:6], v[7:8]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_bcast:15 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_bcast:15 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_bcast:31 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_bcast:31 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9DAGISEL-NEXT: v_readlane_b32 s6, v4, 63
+; GFX9DAGISEL-NEXT: v_readlane_b32 s7, v5, 63
+; GFX9DAGISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, s6
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v3, s7
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9DAGISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9DAGISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9DAGISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9DAGISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9DAGISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9DAGISEL-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX9DAGISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9DAGISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9DAGISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9DAGISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9DAGISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9GISEL-LABEL: divergent_value_double_dpp:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9GISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9GISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9GISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9GISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9GISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9GISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9GISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9GISEL-NEXT: s_or_saveexec_b64 s[4:5], -1
+; GFX9GISEL-NEXT: v_bfrev_b32_e32 v4, 1
+; GFX9GISEL-NEXT: v_cndmask_b32_e64 v5, 0, v2, s[4:5]
+; GFX9GISEL-NEXT: v_cndmask_b32_e64 v6, v4, v3, s[4:5]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9GISEL-NEXT: v_mov_b32_e32 v8, v6
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_mov_b32_dpp v8, v8 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_add_f64 v[4:5], v[5:6], v[7:8]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: v_mov_b32_dpp v6, v6 row_bcast:15 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_mov_b32_dpp v7, v7 row_bcast:15 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: v_mov_b32_dpp v6, v6 row_bcast:31 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_mov_b32_dpp v7, v7 row_bcast:31 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9GISEL-NEXT: v_readlane_b32 s6, v4, 63
+; GFX9GISEL-NEXT: v_readlane_b32 s7, v5, 63
+; GFX9GISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GFX9GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GFX9GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9GISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9GISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9GISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9GISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9GISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9GISEL-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX9GISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9GISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9GISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9GISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9GISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064DAGISEL-LABEL: divergent_value_double_dpp:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX1064DAGISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX1064DAGISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX1064DAGISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX1064DAGISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX1064DAGISEL-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX1064DAGISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX1064DAGISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX1064DAGISEL-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX1064DAGISEL-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0)
+; GFX1064DAGISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[4:5], -1
+; GFX1064DAGISEL-NEXT: v_cndmask_b32_e64 v4, 0, v2, s[4:5]
+; GFX1064DAGISEL-NEXT: v_cndmask_b32_e64 v5, 0x80000000, v3, s[4:5]
+; GFX1064DAGISEL-NEXT: v_mbcnt_lo_u32_b32 v8, -1, 0
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1064DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1064DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1064DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1064DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1064DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1064DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1064DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1064DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1064DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1064DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1064DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1064DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064DAGISEL-NEXT: ds_swizzle_b32 v6, v4 offset:swizzle(BROADCAST,32,15)
+; GFX1064DAGISEL-NEXT: ds_swizzle_b32 v7, v5 offset:swizzle(BROADCAST,32,15)
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064DAGISEL-NEXT: v_mbcnt_hi_u32_b32 v6, -1, v8
+; GFX1064DAGISEL-NEXT: v_add_nc_u32_e32 v6, 32, v6
+; GFX1064DAGISEL-NEXT: v_mul_lo_u32 v6, 4, v6
+; GFX1064DAGISEL-NEXT: ds_permute_b32 v7, v6, v4
+; GFX1064DAGISEL-NEXT: ds_permute_b32 v8, v6, v5
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[7:8]
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s6, v4, 63
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s7, v5, 63
+; GFX1064DAGISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, s6
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v3, s7
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064DAGISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX1064DAGISEL-NEXT: s_clause 0x7 ; 32-byte Folded Reload
+; GFX1064DAGISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32
+; GFX1064DAGISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:4
+; GFX1064DAGISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8
+; GFX1064DAGISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:12
+; GFX1064DAGISEL-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16
+; GFX1064DAGISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:20
+; GFX1064DAGISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:24
+; GFX1064DAGISEL-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:28
+; GFX1064DAGISEL-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0)
+; GFX1064DAGISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX1064DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064GISEL-LABEL: divergent_value_double_dpp:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX1064GISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX1064GISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX1064GISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX1064GISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX1064GISEL-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX1064GISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX1064GISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX1064GISEL-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX1064GISEL-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0)
+; GFX1064GISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX1064GISEL-NEXT: s_or_saveexec_b64 s[4:5], -1
+; GFX1064GISEL-NEXT: v_cndmask_b32_e64 v4, 0, v2, s[4:5]
+; GFX1064GISEL-NEXT: v_cndmask_b32_e64 v5, 0x80000000, v3, s[4:5]
+; GFX1064GISEL-NEXT: v_mbcnt_lo_u32_b32 v8, -1, 0
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1064GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1064GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1064GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1064GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1064GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1064GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1064GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1064GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1064GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1064GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1064GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1064GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064GISEL-NEXT: ds_swizzle_b32 v6, v4 offset:swizzle(BROADCAST,32,15)
+; GFX1064GISEL-NEXT: ds_swizzle_b32 v7, v5 offset:swizzle(BROADCAST,32,15)
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064GISEL-NEXT: v_mbcnt_hi_u32_b32 v6, -1, v8
+; GFX1064GISEL-NEXT: v_add_nc_u32_e32 v6, 32, v6
+; GFX1064GISEL-NEXT: v_mul_lo_u32 v6, 4, v6
+; GFX1064GISEL-NEXT: ds_permute_b32 v7, v6, v4
+; GFX1064GISEL-NEXT: ds_permute_b32 v8, v6, v5
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[7:8]
+; GFX1064GISEL-NEXT: v_readlane_b32 s6, v4, 63
+; GFX1064GISEL-NEXT: v_readlane_b32 s7, v5, 63
+; GFX1064GISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GFX1064GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064GISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX1064GISEL-NEXT: s_clause 0x7 ; 32-byte Folded Reload
+; GFX1064GISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32
+; GFX1064GISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:4
+; GFX1064GISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8
+; GFX1064GISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:12
+; GFX1064GISEL-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16
+; GFX1064GISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:20
+; GFX1064GISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:24
+; GFX1064GISEL-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:28
+; GFX1064GISEL-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0)
+; GFX1064GISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX1064GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032DAGISEL-LABEL: divergent_value_double_dpp:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_xor_saveexec_b32 s4, -1
+; GFX1032DAGISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX1032DAGISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX1032DAGISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX1032DAGISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX1032DAGISEL-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0)
+; GFX1032DAGISEL-NEXT: s_mov_b32 exec_lo, s4
+; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s6, -1
+; GFX1032DAGISEL-NEXT: v_cndmask_b32_e64 v4, 0, v2, s6
+; GFX1032DAGISEL-NEXT: v_cndmask_b32_e64 v5, 0x80000000, v3, s6
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1032DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1032DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1032DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1032DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1032DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1032DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1032DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1032DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1032DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1032DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1032DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1032DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032DAGISEL-NEXT: ds_swizzle_b32 v6, v4 offset:swizzle(BROADCAST,32,15)
+; GFX1032DAGISEL-NEXT: ds_swizzle_b32 v7, v5 offset:swizzle(BROADCAST,32,15)
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s4, v4, 31
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s5, v5, 31
+; GFX1032DAGISEL-NEXT: s_mov_b32 exec_lo, s6
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032DAGISEL-NEXT: s_xor_saveexec_b32 s4, -1
+; GFX1032DAGISEL-NEXT: s_clause 0x3 ; 16-byte Folded Reload
+; GFX1032DAGISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32
+; GFX1032DAGISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:4
+; GFX1032DAGISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8
+; GFX1032DAGISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:12
+; GFX1032DAGISEL-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0)
+; GFX1032DAGISEL-NEXT: s_mov_b32 exec_lo, s4
+; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX1032DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032GISEL-LABEL: divergent_value_double_dpp:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_xor_saveexec_b32 s4, -1
+; GFX1032GISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX1032GISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX1032GISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX1032GISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX1032GISEL-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0)
+; GFX1032GISEL-NEXT: s_mov_b32 exec_lo, s4
+; GFX1032GISEL-NEXT: s_or_saveexec_b32 s6, -1
+; GFX1032GISEL-NEXT: v_cndmask_b32_e64 v4, 0, v2, s6
+; GFX1032GISEL-NEXT: v_cndmask_b32_e64 v5, 0x80000000, v3, s6
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1032GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1032GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1032GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1032GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1032GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1032GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1032GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1032GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1032GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1032GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1032GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1032GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032GISEL-NEXT: ds_swizzle_b32 v6, v4 offset:swizzle(BROADCAST,32,15)
+; GFX1032GISEL-NEXT: ds_swizzle_b32 v7, v5 offset:swizzle(BROADCAST,32,15)
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032GISEL-NEXT: v_readlane_b32 s4, v4, 31
+; GFX1032GISEL-NEXT: v_readlane_b32 s5, v5, 31
+; GFX1032GISEL-NEXT: s_mov_b32 exec_lo, s6
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032GISEL-NEXT: s_xor_saveexec_b32 s4, -1
+; GFX1032GISEL-NEXT: s_clause 0x3 ; 16-byte Folded Reload
+; GFX1032GISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32
+; GFX1032GISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:4
+; GFX1032GISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8
+; GFX1032GISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:12
+; GFX1032GISEL-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0)
+; GFX1032GISEL-NEXT: s_mov_b32 exec_lo, s4
+; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX1032GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164DAGISEL-LABEL: divergent_value_double_dpp:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_xor_saveexec_b64 s[0:1], -1
+; GFX1164DAGISEL-NEXT: s_clause 0x3 ; 28-byte Folded Spill
+; GFX1164DAGISEL-NEXT: scratch_store_b64 off, v[4:5], s32
+; GFX1164DAGISEL-NEXT: scratch_store_b64 off, v[6:7], s32 offset:8
+; GFX1164DAGISEL-NEXT: scratch_store_b32 off, v6, s32 offset:16
+; GFX1164DAGISEL-NEXT: scratch_store_b64 off, v[7:8], s32 offset:20
+; GFX1164DAGISEL-NEXT: s_mov_b64 exec, s[0:1]
+; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], -1
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT: v_cndmask_b32_e64 v4, 0, v2, s[0:1]
+; GFX1164DAGISEL-NEXT: v_cndmask_b32_e64 v5, 0x80000000, v3, s[0:1]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1164DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1164DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1164DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1164DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1164DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1164DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164DAGISEL-NEXT: ds_swizzle_b32 v6, v4 offset:swizzle(BROADCAST,32,15)
+; GFX1164DAGISEL-NEXT: ds_swizzle_b32 v7, v5 offset:swizzle(BROADCAST,32,15)
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164DAGISEL-NEXT: v_mbcnt_lo_u32_b32 v6, -1, 0
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT: v_mbcnt_hi_u32_b32 v6, -1, v6
+; GFX1164DAGISEL-NEXT: v_add_nc_u32_e32 v6, 32, v6
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT: v_mul_lo_u32 v6, 4, v6
+; GFX1164DAGISEL-NEXT: ds_permute_b32 v7, v6, v4
+; GFX1164DAGISEL-NEXT: ds_permute_b32 v8, v6, v5
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[7:8]
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s2, v4, 63
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s3, v5, 63
+; GFX1164DAGISEL-NEXT: s_mov_b64 exec, s[0:1]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164DAGISEL-NEXT: s_xor_saveexec_b64 s[0:1], -1
+; GFX1164DAGISEL-NEXT: s_waitcnt_depctr depctr_sa_sdst(0)
+; GFX1164DAGISEL-NEXT: s_clause 0x3 ; 28-byte Folded Reload
+; GFX1164DAGISEL-NEXT: scratch_load_b64 v[4:5], off, s32
+; GFX1164DAGISEL-NEXT: scratch_load_b64 v[6:7], off, s32 offset:8
+; GFX1164DAGISEL-NEXT: scratch_load_b32 v6, off, s32 offset:16
+; GFX1164DAGISEL-NEXT: scratch_load_b64 v[7:8], off, s32 offset:20
+; GFX1164DAGISEL-NEXT: s_mov_b64 exec, s[0:1]
+; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX1164DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164GISEL-LABEL: divergent_value_double_dpp:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_xor_saveexec_b64 s[0:1], -1
+; GFX1164GISEL-NEXT: s_clause 0x3 ; 28-byte Folded Spill
+; GFX1164GISEL-NEXT: scratch_store_b64 off, v[4:5], s32
+; GFX1164GISEL-NEXT: scratch_store_b64 off, v[6:7], s32 offset:8
+; GFX1164GISEL-NEXT: scratch_store_b32 off, v6, s32 offset:16
+; GFX1164GISEL-NEXT: scratch_store_b64 off, v[7:8], s32 offset:20
+; GFX1164GISEL-NEXT: s_mov_b64 exec, s[0:1]
+; GFX1164GISEL-NEXT: s_or_saveexec_b64 s[0:1], -1
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT: v_cndmask_b32_e64 v4, 0, v2, s[0:1]
+; GFX1164GISEL-NEXT: v_cndmask_b32_e64 v5, 0x80000000, v3, s[0:1]
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1164GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1164GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1164GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1164GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1164GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1164GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164GISEL-NEXT: ds_swizzle_b32 v6, v4 offset:swizzle(BROADCAST,32,15)
+; GFX1164GISEL-NEXT: ds_swizzle_b32 v7, v5 offset:swizzle(BROADCAST,32,15)
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164GISEL-NEXT: v_mbcnt_lo_u32_b32 v6, -1, 0
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_mbcnt_hi_u32_b32 v6, -1, v6
+; GFX1164GISEL-NEXT: v_add_nc_u32_e32 v6, 32, v6
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_mul_lo_u32 v6, 4, v6
+; GFX1164GISEL-NEXT: ds_permute_b32 v7, v6, v4
+; GFX1164GISEL-NEXT: ds_permute_b32 v8, v6, v5
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[7:8]
+; GFX1164GISEL-NEXT: v_readlane_b32 s2, v4, 63
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164GISEL-NEXT: v_readlane_b32 s3, v5, 63
+; GFX1164GISEL-NEXT: s_mov_b64 exec, s[0:1]
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164GISEL-NEXT: s_xor_saveexec_b64 s[0:1], -1
+; GFX1164GISEL-NEXT: s_waitcnt_depctr depctr_sa_sdst(0)
+; GFX1164GISEL-NEXT: s_clause 0x3 ; 28-byte Folded Reload
+; GFX1164GISEL-NEXT: scratch_load_b64 v[4:5], off, s32
+; GFX1164GISEL-NEXT: scratch_load_b64 v[6:7], off, s32 offset:8
+; GFX1164GISEL-NEXT: scratch_load_b32 v6, off, s32 offset:16
+; GFX1164GISEL-NEXT: scratch_load_b64 v[7:8], off, s32 offset:20
+; GFX1164GISEL-NEXT: s_mov_b64 exec, s[0:1]
+; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX1164GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132DAGISEL-LABEL: divergent_value_double_dpp:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_xor_saveexec_b32 s0, -1
+; GFX1132DAGISEL-NEXT: s_clause 0x1 ; 16-byte Folded Spill
+; GFX1132DAGISEL-NEXT: scratch_store_b64 off, v[4:5], s32
+; GFX1132DAGISEL-NEXT: scratch_store_b64 off, v[6:7], s32 offset:8
+; GFX1132DAGISEL-NEXT: s_mov_b32 exec_lo, s0
+; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s2, -1
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT: v_cndmask_b32_e64 v4, 0, v2, s2
+; GFX1132DAGISEL-NEXT: v_cndmask_b32_e64 v5, 0x80000000, v3, s2
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1132DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1132DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1132DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1132DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132DAGISEL-NEXT: ds_swizzle_b32 v6, v4 offset:swizzle(BROADCAST,32,15)
+; GFX1132DAGISEL-NEXT: ds_swizzle_b32 v7, v5 offset:swizzle(BROADCAST,32,15)
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s0, v4, 31
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s1, v5, 31
+; GFX1132DAGISEL-NEXT: s_mov_b32 exec_lo, s2
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132DAGISEL-NEXT: s_xor_saveexec_b32 s0, -1
+; GFX1132DAGISEL-NEXT: s_clause 0x1 ; 16-byte Folded Reload
+; GFX1132DAGISEL-NEXT: scratch_load_b64 v[4:5], off, s32
+; GFX1132DAGISEL-NEXT: scratch_load_b64 v[6:7], off, s32 offset:8
+; GFX1132DAGISEL-NEXT: s_mov_b32 exec_lo, s0
+; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX1132DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132GISEL-LABEL: divergent_value_double_dpp:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_xor_saveexec_b32 s0, -1
+; GFX1132GISEL-NEXT: s_clause 0x1 ; 16-byte Folded Spill
+; GFX1132GISEL-NEXT: scratch_store_b64 off, v[4:5], s32
+; GFX1132GISEL-NEXT: scratch_store_b64 off, v[6:7], s32 offset:8
+; GFX1132GISEL-NEXT: s_mov_b32 exec_lo, s0
+; GFX1132GISEL-NEXT: s_or_saveexec_b32 s2, -1
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_cndmask_b32_e64 v4, 0, v2, s2
+; GFX1132GISEL-NEXT: v_cndmask_b32_e64 v5, 0x80000000, v3, s2
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1132GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1132GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1132GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1132GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132GISEL-NEXT: ds_swizzle_b32 v6, v4 offset:swizzle(BROADCAST,32,15)
+; GFX1132GISEL-NEXT: ds_swizzle_b32 v7, v5 offset:swizzle(BROADCAST,32,15)
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132GISEL-NEXT: v_readlane_b32 s0, v4, 31
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132GISEL-NEXT: v_readlane_b32 s1, v5, 31
+; GFX1132GISEL-NEXT: s_mov_b32 exec_lo, s2
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132GISEL-NEXT: s_xor_saveexec_b32 s0, -1
+; GFX1132GISEL-NEXT: s_clause 0x1 ; 16-byte Folded Reload
+; GFX1132GISEL-NEXT: scratch_load_b64 v[4:5], off, s32
+; GFX1132GISEL-NEXT: scratch_load_b64 v[6:7], off, s32 offset:8
+; GFX1132GISEL-NEXT: s_mov_b32 exec_lo, s0
+; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX1132GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12DAGISEL-LABEL: divergent_value_double_dpp:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_expcnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: s_xor_saveexec_b32 s0, -1
+; GFX12DAGISEL-NEXT: s_clause 0x1 ; 16-byte Folded Spill
+; GFX12DAGISEL-NEXT: scratch_store_b64 off, v[4:5], s32
+; GFX12DAGISEL-NEXT: scratch_store_b64 off, v[6:7], s32 offset:8
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT: s_mov_b32 exec_lo, s0
+; GFX12DAGISEL-NEXT: s_or_saveexec_b32 s2, -1
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT: v_cndmask_b32_e64 v4, 0, v2, s2
+; GFX12DAGISEL-NEXT: v_cndmask_b32_e64 v5, 0x80000000, v3, s2
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX12DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX12DAGISEL-NEXT: v_add_f64_e32 v[4:5], v[4:5], v[6:7]
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX12DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX12DAGISEL-NEXT: v_add_f64_e32 v[4:5], v[4:5], v[6:7]
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX12DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX12DAGISEL-NEXT: v_add_f64_e32 v[4:5], v[4:5], v[6:7]
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX12DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX12DAGISEL-NEXT: v_add_f64_e32 v[4:5], v[4:5], v[6:7]
+; GFX12DAGISEL-NEXT: ds_swizzle_b32 v6, v4 offset:swizzle(BROADCAST,32,15)
+; GFX12DAGISEL-NEXT: ds_swizzle_b32 v7, v5 offset:swizzle(BROADCAST,32,15)
+; GFX12DAGISEL-NEXT: s_wait_dscnt 0x0
+; GFX12DAGISEL-NEXT: v_add_f64_e32 v[4:5], v[4:5], v[6:7]
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12DAGISEL-NEXT: v_readlane_b32 s0, v4, 31
+; GFX12DAGISEL-NEXT: v_readlane_b32 s1, v5, 31
+; GFX12DAGISEL-NEXT: s_mov_b32 exec_lo, s2
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX12DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX12DAGISEL-NEXT: s_xor_saveexec_b32 s0, -1
+; GFX12DAGISEL-NEXT: s_clause 0x1 ; 16-byte Folded Reload
+; GFX12DAGISEL-NEXT: scratch_load_b64 v[4:5], off, s32
+; GFX12DAGISEL-NEXT: scratch_load_b64 v[6:7], off, s32 offset:8
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT: s_mov_b32 exec_lo, s0
+; GFX12DAGISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12DAGISEL-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %result = call double @llvm.amdgcn.wave.reduce.fadd(double %in, i32 2)
+ store double %result, ptr addrspace(1) %out
+ ret void
+}
+
define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in, float %in2) {
; GFX8DAGISEL-LABEL: divergent_cfg_float:
; GFX8DAGISEL: ; %bb.0: ; %entry
@@ -958,7 +1836,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX8DAGISEL-NEXT: ; implicit-def: $sgpr6
; GFX8DAGISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX8DAGISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX8DAGISEL-NEXT: ; %bb.1: ; %else
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -966,11 +1844,11 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8DAGISEL-NEXT: v_mul_f32_e32 v0, s0, v0
; GFX8DAGISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX8DAGISEL-NEXT: .LBB3_2: ; %Flow
+; GFX8DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[2:3]
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s6
; GFX8DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
-; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX8DAGISEL-NEXT: ; %bb.3: ; %if
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -979,7 +1857,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX8DAGISEL-NEXT: v_mul_f32_e32 v0, s1, v0
; GFX8DAGISEL-NEXT: v_readfirstlane_b32 s0, v0
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
-; GFX8DAGISEL-NEXT: .LBB3_4: ; %endif
+; GFX8DAGISEL-NEXT: .LBB4_4: ; %endif
; GFX8DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
@@ -996,7 +1874,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX8GISEL-NEXT: ; implicit-def: $sgpr6
; GFX8GISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX8GISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX8GISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX8GISEL-NEXT: ; %bb.1: ; %else
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -1004,9 +1882,9 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8GISEL-NEXT: v_mul_f32_e32 v0, s0, v0
; GFX8GISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX8GISEL-NEXT: .LBB3_2: ; %Flow
+; GFX8GISEL-NEXT: .LBB4_2: ; %Flow
; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[2:3]
-; GFX8GISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX8GISEL-NEXT: ; %bb.3: ; %if
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1014,7 +1892,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX8GISEL-NEXT: v_cvt_f32_i32_e32 v0, s0
; GFX8GISEL-NEXT: v_mul_f32_e32 v0, s1, v0
; GFX8GISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX8GISEL-NEXT: .LBB3_4: ; %endif
+; GFX8GISEL-NEXT: .LBB4_4: ; %endif
; GFX8GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
@@ -1032,7 +1910,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX9DAGISEL-NEXT: ; implicit-def: $sgpr6
; GFX9DAGISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9DAGISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX9DAGISEL-NEXT: ; %bb.1: ; %else
; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -1040,11 +1918,11 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9DAGISEL-NEXT: v_mul_f32_e32 v0, s0, v0
; GFX9DAGISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9DAGISEL-NEXT: .LBB3_2: ; %Flow
+; GFX9DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[2:3]
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s6
; GFX9DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
-; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX9DAGISEL-NEXT: ; %bb.3: ; %if
; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1053,7 +1931,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX9DAGISEL-NEXT: v_mul_f32_e32 v0, s1, v0
; GFX9DAGISEL-NEXT: v_readfirstlane_b32 s0, v0
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s0
-; GFX9DAGISEL-NEXT: .LBB3_4: ; %endif
+; GFX9DAGISEL-NEXT: .LBB4_4: ; %endif
; GFX9DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
@@ -1069,7 +1947,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX9GISEL-NEXT: ; implicit-def: $sgpr6
; GFX9GISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9GISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX9GISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX9GISEL-NEXT: ; %bb.1: ; %else
; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -1077,9 +1955,9 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9GISEL-NEXT: v_mul_f32_e32 v0, s0, v0
; GFX9GISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9GISEL-NEXT: .LBB3_2: ; %Flow
+; GFX9GISEL-NEXT: .LBB4_2: ; %Flow
; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[2:3]
-; GFX9GISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX9GISEL-NEXT: ; %bb.3: ; %if
; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1087,7 +1965,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX9GISEL-NEXT: v_cvt_f32_i32_e32 v0, s0
; GFX9GISEL-NEXT: v_mul_f32_e32 v0, s1, v0
; GFX9GISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9GISEL-NEXT: .LBB3_4: ; %endif
+; GFX9GISEL-NEXT: .LBB4_4: ; %endif
; GFX9GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
@@ -1104,7 +1982,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1064DAGISEL-NEXT: ; implicit-def: $sgpr6
; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064DAGISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1064DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -1112,11 +1990,11 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064DAGISEL-NEXT: v_mul_f32_e32 v0, s0, v0
; GFX1064DAGISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX1064DAGISEL-NEXT: .LBB3_2: ; %Flow
+; GFX1064DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[2:3]
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s6
; GFX1064DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
-; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX1064DAGISEL-NEXT: ; %bb.3: ; %if
; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1125,7 +2003,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1064DAGISEL-NEXT: v_mul_f32_e32 v0, s1, v0
; GFX1064DAGISEL-NEXT: v_readfirstlane_b32 s0, v0
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s0
-; GFX1064DAGISEL-NEXT: .LBB3_4: ; %endif
+; GFX1064DAGISEL-NEXT: .LBB4_4: ; %endif
; GFX1064DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
@@ -1141,7 +2019,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1064GISEL-NEXT: ; implicit-def: $sgpr6
; GFX1064GISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064GISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX1064GISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1064GISEL-NEXT: ; %bb.1: ; %else
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -1149,9 +2027,9 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064GISEL-NEXT: v_mul_f32_e32 v0, s0, v0
; GFX1064GISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX1064GISEL-NEXT: .LBB3_2: ; %Flow
+; GFX1064GISEL-NEXT: .LBB4_2: ; %Flow
; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[2:3]
-; GFX1064GISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX1064GISEL-NEXT: ; %bb.3: ; %if
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1159,7 +2037,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1064GISEL-NEXT: v_cvt_f32_i32_e32 v0, s0
; GFX1064GISEL-NEXT: v_mul_f32_e32 v0, s1, v0
; GFX1064GISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX1064GISEL-NEXT: .LBB3_4: ; %endif
+; GFX1064GISEL-NEXT: .LBB4_4: ; %endif
; GFX1064GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
@@ -1176,7 +2054,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1032DAGISEL-NEXT: ; implicit-def: $sgpr3
; GFX1032DAGISEL-NEXT: s_and_saveexec_b32 s2, vcc_lo
; GFX1032DAGISEL-NEXT: s_xor_b32 s2, exec_lo, s2
-; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1032DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1032DAGISEL-NEXT: s_mov_b32 s3, exec_lo
; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s3, s3
@@ -1184,12 +2062,12 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032DAGISEL-NEXT: v_mul_f32_e32 v0, s0, v0
; GFX1032DAGISEL-NEXT: v_readfirstlane_b32 s3, v0
-; GFX1032DAGISEL-NEXT: .LBB3_2: ; %Flow
+; GFX1032DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s0, s2
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s3
; GFX1032DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX1032DAGISEL-NEXT: ; %bb.3: ; %if
; GFX1032DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
@@ -1197,7 +2075,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1032DAGISEL-NEXT: v_mul_f32_e32 v0, s1, v0
; GFX1032DAGISEL-NEXT: v_readfirstlane_b32 s1, v0
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s1
-; GFX1032DAGISEL-NEXT: .LBB3_4: ; %endif
+; GFX1032DAGISEL-NEXT: .LBB4_4: ; %endif
; GFX1032DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, 0
@@ -1212,7 +2090,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1032GISEL-NEXT: ; implicit-def: $sgpr2
; GFX1032GISEL-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX1032GISEL-NEXT: s_xor_b32 s3, exec_lo, s3
-; GFX1032GISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1032GISEL-NEXT: ; %bb.1: ; %else
; GFX1032GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
@@ -1220,17 +2098,17 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032GISEL-NEXT: v_mul_f32_e32 v0, s0, v0
; GFX1032GISEL-NEXT: v_readfirstlane_b32 s2, v0
-; GFX1032GISEL-NEXT: .LBB3_2: ; %Flow
+; GFX1032GISEL-NEXT: .LBB4_2: ; %Flow
; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s0, s3
-; GFX1032GISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX1032GISEL-NEXT: ; %bb.3: ; %if
; GFX1032GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
; GFX1032GISEL-NEXT: v_cvt_f32_i32_e32 v0, s2
; GFX1032GISEL-NEXT: v_mul_f32_e32 v0, s1, v0
; GFX1032GISEL-NEXT: v_readfirstlane_b32 s2, v0
-; GFX1032GISEL-NEXT: .LBB3_4: ; %endif
+; GFX1032GISEL-NEXT: .LBB4_4: ; %endif
; GFX1032GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1032GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2
@@ -1248,7 +2126,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX1164DAGISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1164DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -1258,12 +2136,12 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164DAGISEL-NEXT: v_mul_f32_e32 v0, s0, v0
; GFX1164DAGISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX1164DAGISEL-NEXT: .LBB3_2: ; %Flow
+; GFX1164DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[2:3]
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s6
; GFX1164DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
-; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX1164DAGISEL-NEXT: ; %bb.3: ; %if
; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1274,7 +2152,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164DAGISEL-NEXT: v_readfirstlane_b32 s0, v0
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s0
-; GFX1164DAGISEL-NEXT: .LBB3_4: ; %endif
+; GFX1164DAGISEL-NEXT: .LBB4_4: ; %endif
; GFX1164DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
@@ -1292,7 +2170,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
; GFX1164GISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX1164GISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1164GISEL-NEXT: ; %bb.1: ; %else
; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -1302,9 +2180,9 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164GISEL-NEXT: v_mul_f32_e32 v0, s0, v0
; GFX1164GISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX1164GISEL-NEXT: .LBB3_2: ; %Flow
+; GFX1164GISEL-NEXT: .LBB4_2: ; %Flow
; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[2:3], s[2:3]
-; GFX1164GISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX1164GISEL-NEXT: ; %bb.3: ; %if
; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1314,7 +2192,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1164GISEL-NEXT: v_mul_f32_e32 v0, s1, v0
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164GISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX1164GISEL-NEXT: .LBB3_4: ; %endif
+; GFX1164GISEL-NEXT: .LBB4_4: ; %endif
; GFX1164GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
@@ -1334,7 +2212,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX1132DAGISEL-NEXT: s_xor_b32 s2, exec_lo, s2
-; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1132DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1132DAGISEL-NEXT: s_mov_b32 s3, exec_lo
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -1344,13 +2222,13 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1132DAGISEL-NEXT: v_mul_f32_e32 v0, s0, v0
; GFX1132DAGISEL-NEXT: v_readfirstlane_b32 s3, v0
-; GFX1132DAGISEL-NEXT: .LBB3_2: ; %Flow
+; GFX1132DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s0, s2
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, s3
; GFX1132DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX1132DAGISEL-NEXT: ; %bb.3: ; %if
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -1361,7 +2239,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1132DAGISEL-NEXT: v_readfirstlane_b32 s1, v0
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, s1
-; GFX1132DAGISEL-NEXT: .LBB3_4: ; %endif
+; GFX1132DAGISEL-NEXT: .LBB4_4: ; %endif
; GFX1132DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v1, 0
@@ -1378,7 +2256,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
; GFX1132GISEL-NEXT: s_xor_b32 s3, exec_lo, s3
-; GFX1132GISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1132GISEL-NEXT: ; %bb.1: ; %else
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -1388,10 +2266,10 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1132GISEL-NEXT: v_mul_f32_e32 v0, s0, v0
; GFX1132GISEL-NEXT: v_readfirstlane_b32 s2, v0
-; GFX1132GISEL-NEXT: .LBB3_2: ; %Flow
+; GFX1132GISEL-NEXT: .LBB4_2: ; %Flow
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s0, s3
-; GFX1132GISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX1132GISEL-NEXT: ; %bb.3: ; %if
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -1400,7 +2278,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1132GISEL-NEXT: v_mul_f32_e32 v0, s1, v0
; GFX1132GISEL-NEXT: v_readfirstlane_b32 s2, v0
-; GFX1132GISEL-NEXT: .LBB3_4: ; %endif
+; GFX1132GISEL-NEXT: .LBB4_4: ; %endif
; GFX1132GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -1418,7 +2296,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX12DAGISEL-NEXT: s_xor_b32 s2, exec_lo, s2
-; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX12DAGISEL-NEXT: ; %bb.1: ; %else
; GFX12DAGISEL-NEXT: s_mov_b32 s3, exec_lo
; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -1428,14 +2306,14 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12DAGISEL-NEXT: v_mul_f32_e32 v0, s0, v0
; GFX12DAGISEL-NEXT: v_readfirstlane_b32 s3, v0
-; GFX12DAGISEL-NEXT: .LBB3_2: ; %Flow
+; GFX12DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
; GFX12DAGISEL-NEXT: s_or_saveexec_b32 s0, s2
; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12DAGISEL-NEXT: v_mov_b32_e32 v0, s3
; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; GFX12DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX12DAGISEL-NEXT: ; %bb.3: ; %if
; GFX12DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
@@ -1448,7 +2326,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX12DAGISEL-NEXT: s_wait_alu depctr_va_sdst(0)
; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12DAGISEL-NEXT: v_mov_b32_e32 v0, s1
-; GFX12DAGISEL-NEXT: .LBB3_4: ; %endif
+; GFX12DAGISEL-NEXT: .LBB4_4: ; %endif
; GFX12DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX12DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX12DAGISEL-NEXT: v_mov_b32_e32 v1, 0
@@ -1710,7 +2588,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX8DAGISEL-NEXT: s_mov_b32 s6, 0
; GFX8DAGISEL-NEXT: s_brev_b32 s7, 1
; GFX8DAGISEL-NEXT: s_mov_b64 s[4:5], exec
-; GFX8DAGISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX8DAGISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s10, s[4:5]
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v4, s6
; GFX8DAGISEL-NEXT: v_readlane_b32 s8, v2, s10
@@ -1721,7 +2599,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX8DAGISEL-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX8DAGISEL-NEXT: v_readfirstlane_b32 s6, v4
; GFX8DAGISEL-NEXT: v_readfirstlane_b32 s7, v5
-; GFX8DAGISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX8DAGISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX8DAGISEL-NEXT: ; %bb.2:
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s6
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s7
@@ -1735,7 +2613,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX8GISEL-NEXT: s_mov_b32 s6, 0
; GFX8GISEL-NEXT: s_brev_b32 s7, 1
; GFX8GISEL-NEXT: s_mov_b64 s[4:5], exec
-; GFX8GISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX8GISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX8GISEL-NEXT: s_ff1_i32_b64 s10, s[4:5]
; GFX8GISEL-NEXT: v_mov_b32_e32 v4, s6
; GFX8GISEL-NEXT: v_readlane_b32 s8, v2, s10
@@ -1746,7 +2624,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX8GISEL-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX8GISEL-NEXT: v_readfirstlane_b32 s6, v4
; GFX8GISEL-NEXT: v_readfirstlane_b32 s7, v5
-; GFX8GISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX8GISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX8GISEL-NEXT: ; %bb.2:
; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s6
; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s7
@@ -1760,7 +2638,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX9DAGISEL-NEXT: s_mov_b32 s6, 0
; GFX9DAGISEL-NEXT: s_brev_b32 s7, 1
; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], exec
-; GFX9DAGISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX9DAGISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s10, s[4:5]
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v4, s6
; GFX9DAGISEL-NEXT: v_readlane_b32 s8, v2, s10
@@ -1771,7 +2649,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX9DAGISEL-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX9DAGISEL-NEXT: v_readfirstlane_b32 s6, v4
; GFX9DAGISEL-NEXT: v_readfirstlane_b32 s7, v5
-; GFX9DAGISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX9DAGISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX9DAGISEL-NEXT: ; %bb.2:
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, s6
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v3, s7
@@ -1785,7 +2663,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX9GISEL-NEXT: s_mov_b32 s6, 0
; GFX9GISEL-NEXT: s_brev_b32 s7, 1
; GFX9GISEL-NEXT: s_mov_b64 s[4:5], exec
-; GFX9GISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX9GISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX9GISEL-NEXT: s_ff1_i32_b64 s10, s[4:5]
; GFX9GISEL-NEXT: v_mov_b32_e32 v4, s6
; GFX9GISEL-NEXT: v_readlane_b32 s8, v2, s10
@@ -1796,7 +2674,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX9GISEL-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX9GISEL-NEXT: v_readfirstlane_b32 s6, v4
; GFX9GISEL-NEXT: v_readfirstlane_b32 s7, v5
-; GFX9GISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX9GISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX9GISEL-NEXT: ; %bb.2:
; GFX9GISEL-NEXT: v_mov_b32_e32 v2, s6
; GFX9GISEL-NEXT: v_mov_b32_e32 v3, s7
@@ -1810,7 +2688,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1064DAGISEL-NEXT: s_mov_b32 s6, 0
; GFX1064DAGISEL-NEXT: s_brev_b32 s7, 1
; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], exec
-; GFX1064DAGISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064DAGISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s10, s[4:5]
; GFX1064DAGISEL-NEXT: v_readlane_b32 s8, v2, s10
; GFX1064DAGISEL-NEXT: v_readlane_b32 s9, v3, s10
@@ -1819,7 +2697,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1064DAGISEL-NEXT: v_add_f64 v[4:5], s[8:9], s[6:7]
; GFX1064DAGISEL-NEXT: v_readfirstlane_b32 s6, v4
; GFX1064DAGISEL-NEXT: v_readfirstlane_b32 s7, v5
-; GFX1064DAGISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX1064DAGISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1064DAGISEL-NEXT: ; %bb.2:
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, s6
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v3, s7
@@ -1832,7 +2710,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1064GISEL-NEXT: s_mov_b32 s6, 0
; GFX1064GISEL-NEXT: s_brev_b32 s7, 1
; GFX1064GISEL-NEXT: s_mov_b64 s[4:5], exec
-; GFX1064GISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064GISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX1064GISEL-NEXT: s_ff1_i32_b64 s10, s[4:5]
; GFX1064GISEL-NEXT: v_readlane_b32 s8, v2, s10
; GFX1064GISEL-NEXT: v_readlane_b32 s9, v3, s10
@@ -1841,7 +2719,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1064GISEL-NEXT: v_add_f64 v[4:5], s[8:9], s[6:7]
; GFX1064GISEL-NEXT: v_readfirstlane_b32 s6, v4
; GFX1064GISEL-NEXT: v_readfirstlane_b32 s7, v5
-; GFX1064GISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX1064GISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1064GISEL-NEXT: ; %bb.2:
; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, s6
; GFX1064GISEL-NEXT: v_mov_b32_e32 v3, s7
@@ -1854,7 +2732,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1032DAGISEL-NEXT: s_mov_b32 s4, 0
; GFX1032DAGISEL-NEXT: s_brev_b32 s5, 1
; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
-; GFX1032DAGISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032DAGISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032DAGISEL-NEXT: v_readlane_b32 s8, v2, s7
; GFX1032DAGISEL-NEXT: v_readlane_b32 s9, v3, s7
@@ -1863,7 +2741,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1032DAGISEL-NEXT: v_add_f64 v[4:5], s[8:9], s[4:5]
; GFX1032DAGISEL-NEXT: v_readfirstlane_b32 s4, v4
; GFX1032DAGISEL-NEXT: v_readfirstlane_b32 s5, v5
-; GFX1032DAGISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX1032DAGISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1032DAGISEL-NEXT: ; %bb.2:
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, s4
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v3, s5
@@ -1876,7 +2754,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1032GISEL-NEXT: s_mov_b32 s4, 0
; GFX1032GISEL-NEXT: s_brev_b32 s5, 1
; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
-; GFX1032GISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032GISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032GISEL-NEXT: v_readlane_b32 s8, v2, s7
; GFX1032GISEL-NEXT: v_readlane_b32 s9, v3, s7
@@ -1885,7 +2763,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1032GISEL-NEXT: v_add_f64 v[4:5], s[8:9], s[4:5]
; GFX1032GISEL-NEXT: v_readfirstlane_b32 s4, v4
; GFX1032GISEL-NEXT: v_readfirstlane_b32 s5, v5
-; GFX1032GISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX1032GISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1032GISEL-NEXT: ; %bb.2:
; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, s4
; GFX1032GISEL-NEXT: v_mov_b32_e32 v3, s5
@@ -1898,7 +2776,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1164DAGISEL-NEXT: s_mov_b32 s2, 0
; GFX1164DAGISEL-NEXT: s_brev_b32 s3, 1
; GFX1164DAGISEL-NEXT: s_mov_b64 s[0:1], exec
-; GFX1164DAGISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s6, s[0:1]
; GFX1164DAGISEL-NEXT: v_readlane_b32 s4, v2, s6
@@ -1910,7 +2788,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1164DAGISEL-NEXT: v_readfirstlane_b32 s2, v4
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1164DAGISEL-NEXT: v_readfirstlane_b32 s3, v5
-; GFX1164DAGISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX1164DAGISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1164DAGISEL-NEXT: ; %bb.2:
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, s2
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v3, s3
@@ -1923,7 +2801,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1164GISEL-NEXT: s_mov_b32 s2, 0
; GFX1164GISEL-NEXT: s_brev_b32 s3, 1
; GFX1164GISEL-NEXT: s_mov_b64 s[0:1], exec
-; GFX1164GISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164GISEL-NEXT: s_ctz_i32_b64 s6, s[0:1]
; GFX1164GISEL-NEXT: v_readlane_b32 s4, v2, s6
@@ -1935,7 +2813,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1164GISEL-NEXT: v_readfirstlane_b32 s2, v4
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1164GISEL-NEXT: v_readfirstlane_b32 s3, v5
-; GFX1164GISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX1164GISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1164GISEL-NEXT: ; %bb.2:
; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, s2
; GFX1164GISEL-NEXT: v_mov_b32_e32 v3, s3
@@ -1948,7 +2826,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1132DAGISEL-NEXT: s_mov_b32 s0, 0
; GFX1132DAGISEL-NEXT: s_brev_b32 s1, 1
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
@@ -1960,7 +2838,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1132DAGISEL-NEXT: v_readfirstlane_b32 s0, v4
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1132DAGISEL-NEXT: v_readfirstlane_b32 s1, v5
-; GFX1132DAGISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX1132DAGISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1132DAGISEL-NEXT: ; %bb.2:
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX1132DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
@@ -1972,7 +2850,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1132GISEL-NEXT: s_mov_b32 s0, 0
; GFX1132GISEL-NEXT: s_brev_b32 s1, 1
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
@@ -1984,7 +2862,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1132GISEL-NEXT: v_readfirstlane_b32 s0, v4
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1132GISEL-NEXT: v_readfirstlane_b32 s1, v5
-; GFX1132GISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX1132GISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1132GISEL-NEXT: ; %bb.2:
; GFX1132GISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX1132GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
@@ -2000,7 +2878,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX12DAGISEL-NEXT: s_mov_b32 s0, 0
; GFX12DAGISEL-NEXT: s_brev_b32 s1, 1
; GFX12DAGISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX12DAGISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX12DAGISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; GFX12DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
@@ -2013,7 +2891,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12DAGISEL-NEXT: v_readfirstlane_b32 s0, v4
; GFX12DAGISEL-NEXT: v_readfirstlane_b32 s1, v5
-; GFX12DAGISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX12DAGISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX12DAGISEL-NEXT: ; %bb.2:
; GFX12DAGISEL-NEXT: s_wait_alu depctr_va_sdst(0)
; GFX12DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
@@ -2034,7 +2912,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX8DAGISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
; GFX8DAGISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
; GFX8DAGISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
-; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX8DAGISEL-NEXT: ; %bb.1: ; %else
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -2043,13 +2921,13 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX8DAGISEL-NEXT: v_mul_f64 v[0:1], s[2:3], v[0:1]
; GFX8DAGISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX8DAGISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX8DAGISEL-NEXT: .LBB6_2: ; %Flow
+; GFX8DAGISEL-NEXT: .LBB7_2: ; %Flow
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[8:9]
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s6
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s7
; GFX8DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
-; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX8DAGISEL-NEXT: ; %bb.3: ; %if
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -2059,7 +2937,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX8DAGISEL-NEXT: v_readfirstlane_b32 s5, v1
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s4
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s5
-; GFX8DAGISEL-NEXT: .LBB6_4: ; %endif
+; GFX8DAGISEL-NEXT: .LBB7_4: ; %endif
; GFX8DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
@@ -2073,7 +2951,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX8GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
; GFX8GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
; GFX8GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
-; GFX8GISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX8GISEL-NEXT: ; %bb.1: ; %else
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -2082,10 +2960,10 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX8GISEL-NEXT: v_mul_f64 v[0:1], s[2:3], v[0:1]
; GFX8GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX8GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX8GISEL-NEXT: .LBB6_2: ; %Flow
+; GFX8GISEL-NEXT: .LBB7_2: ; %Flow
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
-; GFX8GISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX8GISEL-NEXT: ; %bb.3: ; %if
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX8GISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
@@ -2095,7 +2973,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX8GISEL-NEXT: v_mul_f64 v[0:1], s[4:5], v[0:1]
; GFX8GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX8GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX8GISEL-NEXT: .LBB6_4: ; %endif
+; GFX8GISEL-NEXT: .LBB7_4: ; %endif
; GFX8GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s6
; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
@@ -2112,7 +2990,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX9DAGISEL-NEXT: ; implicit-def: $sgpr4_sgpr5
; GFX9DAGISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
; GFX9DAGISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
-; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX9DAGISEL-NEXT: ; %bb.1: ; %else
; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], exec
; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
@@ -2121,13 +2999,13 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX9DAGISEL-NEXT: v_mul_f64 v[0:1], s[2:3], v[0:1]
; GFX9DAGISEL-NEXT: v_readfirstlane_b32 s4, v0
; GFX9DAGISEL-NEXT: v_readfirstlane_b32 s5, v1
-; GFX9DAGISEL-NEXT: .LBB6_2: ; %Flow
+; GFX9DAGISEL-NEXT: .LBB7_2: ; %Flow
; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[8:9]
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s4
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s5
; GFX9DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
-; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX9DAGISEL-NEXT: ; %bb.3: ; %if
; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], exec
; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
@@ -2137,7 +3015,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX9DAGISEL-NEXT: v_readfirstlane_b32 s5, v1
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s4
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s5
-; GFX9DAGISEL-NEXT: .LBB6_4: ; %endif
+; GFX9DAGISEL-NEXT: .LBB7_4: ; %endif
; GFX9DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -2150,7 +3028,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX9GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
; GFX9GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
; GFX9GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
-; GFX9GISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX9GISEL-NEXT: ; %bb.1: ; %else
; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -2159,10 +3037,10 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX9GISEL-NEXT: v_mul_f64 v[0:1], s[2:3], v[0:1]
; GFX9GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX9GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9GISEL-NEXT: .LBB6_2: ; %Flow
+; GFX9GISEL-NEXT: .LBB7_2: ; %Flow
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
-; GFX9GISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX9GISEL-NEXT: ; %bb.3: ; %if
; GFX9GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX9GISEL-NEXT: s_mov_b64 s[4:5], exec
@@ -2172,7 +3050,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX9GISEL-NEXT: v_mul_f64 v[0:1], s[6:7], v[0:1]
; GFX9GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX9GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9GISEL-NEXT: .LBB6_4: ; %endif
+; GFX9GISEL-NEXT: .LBB7_4: ; %endif
; GFX9GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s6
; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s7
@@ -2189,7 +3067,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1064DAGISEL-NEXT: ; implicit-def: $sgpr8_sgpr9
; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX1064DAGISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX1064DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1064DAGISEL-NEXT: s_mov_b64 s[8:9], exec
; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s8, s[8:9]
@@ -2198,13 +3076,13 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1064DAGISEL-NEXT: v_mul_f64 v[0:1], s[2:3], v[0:1]
; GFX1064DAGISEL-NEXT: v_readfirstlane_b32 s8, v0
; GFX1064DAGISEL-NEXT: v_readfirstlane_b32 s9, v1
-; GFX1064DAGISEL-NEXT: .LBB6_2: ; %Flow
+; GFX1064DAGISEL-NEXT: .LBB7_2: ; %Flow
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[4:5]
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s8
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s9
; GFX1064DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
-; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX1064DAGISEL-NEXT: ; %bb.3: ; %if
; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], exec
; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
@@ -2214,7 +3092,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1064DAGISEL-NEXT: v_readfirstlane_b32 s5, v1
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s4
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s5
-; GFX1064DAGISEL-NEXT: .LBB6_4: ; %endif
+; GFX1064DAGISEL-NEXT: .LBB7_4: ; %endif
; GFX1064DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -2227,7 +3105,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1064GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
; GFX1064GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
; GFX1064GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
-; GFX1064GISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX1064GISEL-NEXT: ; %bb.1: ; %else
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -2236,10 +3114,10 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1064GISEL-NEXT: v_mul_f64 v[0:1], s[2:3], v[0:1]
; GFX1064GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX1064GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX1064GISEL-NEXT: .LBB6_2: ; %Flow
+; GFX1064GISEL-NEXT: .LBB7_2: ; %Flow
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
-; GFX1064GISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX1064GISEL-NEXT: ; %bb.3: ; %if
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -2249,7 +3127,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1064GISEL-NEXT: v_mul_f64 v[0:1], s[6:7], v[0:1]
; GFX1064GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX1064GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX1064GISEL-NEXT: .LBB6_4: ; %endif
+; GFX1064GISEL-NEXT: .LBB7_4: ; %endif
; GFX1064GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s6
; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s7
@@ -2266,7 +3144,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1032DAGISEL-NEXT: ; implicit-def: $sgpr4_sgpr5
; GFX1032DAGISEL-NEXT: s_and_saveexec_b32 s8, vcc_lo
; GFX1032DAGISEL-NEXT: s_xor_b32 s8, exec_lo, s8
-; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX1032DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1032DAGISEL-NEXT: s_mov_b32 s4, exec_lo
; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s4, s4
@@ -2275,13 +3153,13 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1032DAGISEL-NEXT: v_mul_f64 v[0:1], s[2:3], v[0:1]
; GFX1032DAGISEL-NEXT: v_readfirstlane_b32 s4, v0
; GFX1032DAGISEL-NEXT: v_readfirstlane_b32 s5, v1
-; GFX1032DAGISEL-NEXT: .LBB6_2: ; %Flow
+; GFX1032DAGISEL-NEXT: .LBB7_2: ; %Flow
; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s2, s8
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s4
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s5
; GFX1032DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s2
-; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX1032DAGISEL-NEXT: ; %bb.3: ; %if
; GFX1032DAGISEL-NEXT: s_mov_b32 s3, exec_lo
; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s3, s3
@@ -2291,7 +3169,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1032DAGISEL-NEXT: v_readfirstlane_b32 s5, v1
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s4
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s5
-; GFX1032DAGISEL-NEXT: .LBB6_4: ; %endif
+; GFX1032DAGISEL-NEXT: .LBB7_4: ; %endif
; GFX1032DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -2304,7 +3182,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1032GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
; GFX1032GISEL-NEXT: s_and_saveexec_b32 s8, vcc_lo
; GFX1032GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
-; GFX1032GISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX1032GISEL-NEXT: ; %bb.1: ; %else
; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s6, s6
@@ -2313,10 +3191,10 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1032GISEL-NEXT: v_mul_f64 v[0:1], s[2:3], v[0:1]
; GFX1032GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX1032GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX1032GISEL-NEXT: .LBB6_2: ; %Flow
+; GFX1032GISEL-NEXT: .LBB7_2: ; %Flow
; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s2, s8
-; GFX1032GISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX1032GISEL-NEXT: ; %bb.3: ; %if
; GFX1032GISEL-NEXT: s_mov_b32 s3, exec_lo
; GFX1032GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
@@ -2326,7 +3204,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1032GISEL-NEXT: v_mul_f64 v[0:1], s[6:7], v[0:1]
; GFX1032GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX1032GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX1032GISEL-NEXT: .LBB6_4: ; %endif
+; GFX1032GISEL-NEXT: .LBB7_4: ; %endif
; GFX1032GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s6
; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s7
@@ -2345,7 +3223,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX1164DAGISEL-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
-; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX1164DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1164DAGISEL-NEXT: s_mov_b64 s[8:9], exec
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -2357,14 +3235,14 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1164DAGISEL-NEXT: v_readfirstlane_b32 s8, v0
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1164DAGISEL-NEXT: v_readfirstlane_b32 s9, v1
-; GFX1164DAGISEL-NEXT: .LBB6_2: ; %Flow
+; GFX1164DAGISEL-NEXT: .LBB7_2: ; %Flow
; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[6:7]
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s8
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s9
; GFX1164DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
-; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX1164DAGISEL-NEXT: ; %bb.3: ; %if
; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -2378,7 +3256,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s4
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s5
-; GFX1164DAGISEL-NEXT: .LBB6_4: ; %endif
+; GFX1164DAGISEL-NEXT: .LBB7_4: ; %endif
; GFX1164DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -2393,7 +3271,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
; GFX1164GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
-; GFX1164GISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX1164GISEL-NEXT: ; %bb.1: ; %else
; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -2405,10 +3283,10 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1164GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1164GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX1164GISEL-NEXT: .LBB6_2: ; %Flow
+; GFX1164GISEL-NEXT: .LBB7_2: ; %Flow
; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[2:3], s[8:9]
-; GFX1164GISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX1164GISEL-NEXT: ; %bb.3: ; %if
; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1164GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
@@ -2420,7 +3298,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1164GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX1164GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX1164GISEL-NEXT: .LBB6_4: ; %endif
+; GFX1164GISEL-NEXT: .LBB7_4: ; %endif
; GFX1164GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s6
@@ -2440,7 +3318,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX1132DAGISEL-NEXT: s_xor_b32 s8, exec_lo, s8
-; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX1132DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1132DAGISEL-NEXT: s_mov_b32 s6, exec_lo
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -2452,13 +3330,13 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1132DAGISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1132DAGISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX1132DAGISEL-NEXT: .LBB6_2: ; %Flow
+; GFX1132DAGISEL-NEXT: .LBB7_2: ; %Flow
; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s2, s8
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
; GFX1132DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s2
-; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX1132DAGISEL-NEXT: ; %bb.3: ; %if
; GFX1132DAGISEL-NEXT: s_mov_b32 s3, exec_lo
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -2470,7 +3348,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1132DAGISEL-NEXT: v_readfirstlane_b32 s5, v1
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX1132DAGISEL-NEXT: .LBB6_4: ; %endif
+; GFX1132DAGISEL-NEXT: .LBB7_4: ; %endif
; GFX1132DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -2485,7 +3363,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
; GFX1132GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
-; GFX1132GISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX1132GISEL-NEXT: ; %bb.1: ; %else
; GFX1132GISEL-NEXT: s_mov_b32 s6, exec_lo
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -2497,10 +3375,10 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1132GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1132GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX1132GISEL-NEXT: .LBB6_2: ; %Flow
+; GFX1132GISEL-NEXT: .LBB7_2: ; %Flow
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s2, s8
-; GFX1132GISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX1132GISEL-NEXT: ; %bb.3: ; %if
; GFX1132GISEL-NEXT: s_mov_b32 s3, exec_lo
; GFX1132GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
@@ -2512,7 +3390,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1132GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX1132GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX1132GISEL-NEXT: .LBB6_4: ; %endif
+; GFX1132GISEL-NEXT: .LBB7_4: ; %endif
; GFX1132GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
@@ -2531,7 +3409,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX12DAGISEL-NEXT: s_xor_b32 s8, exec_lo, s8
-; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX12DAGISEL-NEXT: ; %bb.1: ; %else
; GFX12DAGISEL-NEXT: s_mov_b32 s6, exec_lo
; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -2543,7 +3421,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX12DAGISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX12DAGISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX12DAGISEL-NEXT: .LBB6_2: ; %Flow
+; GFX12DAGISEL-NEXT: .LBB7_2: ; %Flow
; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
; GFX12DAGISEL-NEXT: s_or_saveexec_b32 s2, s8
; GFX12DAGISEL-NEXT: s_wait_alu depctr_va_sdst(0)
@@ -2551,7 +3429,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX12DAGISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; GFX12DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s2
-; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX12DAGISEL-NEXT: ; %bb.3: ; %if
; GFX12DAGISEL-NEXT: s_mov_b32 s3, exec_lo
; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
@@ -2565,7 +3443,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX12DAGISEL-NEXT: v_readfirstlane_b32 s5, v1
; GFX12DAGISEL-NEXT: s_wait_alu depctr_va_sdst(0)
; GFX12DAGISEL-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX12DAGISEL-NEXT: .LBB6_4: ; %endif
+; GFX12DAGISEL-NEXT: .LBB7_4: ; %endif
; GFX12DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX12DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX12DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fsub.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fsub.ll
index 732d103951b8f..67e470cf2ea64 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fsub.ll
@@ -961,6 +961,906 @@ entry:
ret void
}
+define void @divergent_value_double_dpp(ptr addrspace(1) %out, double %in) {
+; GFX8DAGISEL-LABEL: divergent_value_double_dpp:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX8DAGISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX8DAGISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX8DAGISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX8DAGISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX8DAGISEL-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX8DAGISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX8DAGISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX8DAGISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX8DAGISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX8DAGISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[4:5], -1
+; GFX8DAGISEL-NEXT: v_bfrev_b32_e32 v4, 1
+; GFX8DAGISEL-NEXT: v_cndmask_b32_e64 v5, 0, v2, s[4:5]
+; GFX8DAGISEL-NEXT: v_cndmask_b32_e64 v6, v4, v3, s[4:5]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v8, v6
+; GFX8DAGISEL-NEXT: s_nop 0
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v8, v8 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_add_f64 v[4:5], v[5:6], v[7:8]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8DAGISEL-NEXT: s_nop 0
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8DAGISEL-NEXT: s_nop 0
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8DAGISEL-NEXT: s_nop 0
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8DAGISEL-NEXT: s_nop 0
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_bcast:15 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_bcast:15 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8DAGISEL-NEXT: s_nop 0
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_bcast:31 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_bcast:31 row_mask:0xf bank_mask:0xf
+; GFX8DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v6, 0
+; GFX8DAGISEL-NEXT: v_bfrev_b32_e32 v7, 1
+; GFX8DAGISEL-NEXT: v_add_f64 v[4:5], v[6:7], -v[4:5]
+; GFX8DAGISEL-NEXT: v_readlane_b32 s6, v4, 63
+; GFX8DAGISEL-NEXT: v_readlane_b32 s7, v5, 63
+; GFX8DAGISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s6
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s7
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX8DAGISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX8DAGISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX8DAGISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX8DAGISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX8DAGISEL-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX8DAGISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX8DAGISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX8DAGISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX8DAGISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX8DAGISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8GISEL-LABEL: divergent_value_double_dpp:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8GISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX8GISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX8GISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX8GISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX8GISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX8GISEL-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX8GISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX8GISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX8GISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX8GISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX8GISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX8GISEL-NEXT: s_or_saveexec_b64 s[4:5], -1
+; GFX8GISEL-NEXT: v_bfrev_b32_e32 v4, 1
+; GFX8GISEL-NEXT: v_cndmask_b32_e64 v5, 0, v2, s[4:5]
+; GFX8GISEL-NEXT: v_cndmask_b32_e64 v6, v4, v3, s[4:5]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8GISEL-NEXT: v_mov_b32_e32 v8, v6
+; GFX8GISEL-NEXT: s_nop 0
+; GFX8GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_mov_b32_dpp v8, v8 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_add_f64 v[4:5], v[5:6], v[7:8]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8GISEL-NEXT: s_nop 0
+; GFX8GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8GISEL-NEXT: s_nop 0
+; GFX8GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8GISEL-NEXT: s_nop 0
+; GFX8GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8GISEL-NEXT: s_nop 0
+; GFX8GISEL-NEXT: v_mov_b32_dpp v6, v6 row_bcast:15 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_mov_b32_dpp v7, v7 row_bcast:15 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX8GISEL-NEXT: s_nop 0
+; GFX8GISEL-NEXT: v_mov_b32_dpp v6, v6 row_bcast:31 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_mov_b32_dpp v7, v7 row_bcast:31 row_mask:0xf bank_mask:0xf
+; GFX8GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v6, 0
+; GFX8GISEL-NEXT: v_bfrev_b32_e32 v7, 1
+; GFX8GISEL-NEXT: v_add_f64 v[4:5], v[6:7], -v[4:5]
+; GFX8GISEL-NEXT: v_readlane_b32 s6, v4, 63
+; GFX8GISEL-NEXT: v_readlane_b32 s7, v5, 63
+; GFX8GISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8GISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX8GISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX8GISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX8GISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX8GISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX8GISEL-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX8GISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX8GISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX8GISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX8GISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX8GISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9DAGISEL-LABEL: divergent_value_double_dpp:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9DAGISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9DAGISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9DAGISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9DAGISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9DAGISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9DAGISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[4:5], -1
+; GFX9DAGISEL-NEXT: v_bfrev_b32_e32 v4, 1
+; GFX9DAGISEL-NEXT: v_cndmask_b32_e64 v5, 0, v2, s[4:5]
+; GFX9DAGISEL-NEXT: v_cndmask_b32_e64 v6, v4, v3, s[4:5]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v8, v6
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v8, v8 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_add_f64 v[4:5], v[5:6], v[7:8]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_bcast:15 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_bcast:15 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9DAGISEL-NEXT: s_nop 0
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_bcast:31 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_bcast:31 row_mask:0xf bank_mask:0xf
+; GFX9DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v6, 0
+; GFX9DAGISEL-NEXT: v_bfrev_b32_e32 v7, 1
+; GFX9DAGISEL-NEXT: v_add_f64 v[4:5], v[6:7], -v[4:5]
+; GFX9DAGISEL-NEXT: v_readlane_b32 s6, v4, 63
+; GFX9DAGISEL-NEXT: v_readlane_b32 s7, v5, 63
+; GFX9DAGISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, s6
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v3, s7
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9DAGISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9DAGISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9DAGISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9DAGISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9DAGISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9DAGISEL-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX9DAGISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9DAGISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9DAGISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9DAGISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9DAGISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9GISEL-LABEL: divergent_value_double_dpp:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9GISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9GISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9GISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9GISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9GISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9GISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9GISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9GISEL-NEXT: s_or_saveexec_b64 s[4:5], -1
+; GFX9GISEL-NEXT: v_bfrev_b32_e32 v4, 1
+; GFX9GISEL-NEXT: v_cndmask_b32_e64 v5, 0, v2, s[4:5]
+; GFX9GISEL-NEXT: v_cndmask_b32_e64 v6, v4, v3, s[4:5]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9GISEL-NEXT: v_mov_b32_e32 v8, v6
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_mov_b32_dpp v8, v8 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_add_f64 v[4:5], v[5:6], v[7:8]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: v_mov_b32_dpp v6, v6 row_bcast:15 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_mov_b32_dpp v7, v7 row_bcast:15 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX9GISEL-NEXT: s_nop 0
+; GFX9GISEL-NEXT: v_mov_b32_dpp v6, v6 row_bcast:31 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_mov_b32_dpp v7, v7 row_bcast:31 row_mask:0xf bank_mask:0xf
+; GFX9GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v6, 0
+; GFX9GISEL-NEXT: v_bfrev_b32_e32 v7, 1
+; GFX9GISEL-NEXT: v_add_f64 v[4:5], v[6:7], -v[4:5]
+; GFX9GISEL-NEXT: v_readlane_b32 s6, v4, 63
+; GFX9GISEL-NEXT: v_readlane_b32 s7, v5, 63
+; GFX9GISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GFX9GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GFX9GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9GISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9GISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9GISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9GISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9GISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9GISEL-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX9GISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9GISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9GISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9GISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9GISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064DAGISEL-LABEL: divergent_value_double_dpp:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX1064DAGISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX1064DAGISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX1064DAGISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX1064DAGISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX1064DAGISEL-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX1064DAGISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX1064DAGISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX1064DAGISEL-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX1064DAGISEL-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0)
+; GFX1064DAGISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[4:5], -1
+; GFX1064DAGISEL-NEXT: v_cndmask_b32_e64 v4, 0, v2, s[4:5]
+; GFX1064DAGISEL-NEXT: v_cndmask_b32_e64 v5, 0x80000000, v3, s[4:5]
+; GFX1064DAGISEL-NEXT: v_mbcnt_lo_u32_b32 v8, -1, 0
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1064DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1064DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1064DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1064DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1064DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1064DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1064DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1064DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1064DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1064DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1064DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1064DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064DAGISEL-NEXT: ds_swizzle_b32 v6, v4 offset:swizzle(BROADCAST,32,15)
+; GFX1064DAGISEL-NEXT: ds_swizzle_b32 v7, v5 offset:swizzle(BROADCAST,32,15)
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064DAGISEL-NEXT: v_mbcnt_hi_u32_b32 v6, -1, v8
+; GFX1064DAGISEL-NEXT: v_add_nc_u32_e32 v6, 32, v6
+; GFX1064DAGISEL-NEXT: v_mul_lo_u32 v6, 4, v6
+; GFX1064DAGISEL-NEXT: ds_permute_b32 v7, v6, v4
+; GFX1064DAGISEL-NEXT: ds_permute_b32 v8, v6, v5
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[7:8]
+; GFX1064DAGISEL-NEXT: v_add_f64 v[4:5], 0x80000000, -v[4:5]
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s6, v4, 63
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s7, v5, 63
+; GFX1064DAGISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, s6
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v3, s7
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064DAGISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX1064DAGISEL-NEXT: s_clause 0x7 ; 32-byte Folded Reload
+; GFX1064DAGISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32
+; GFX1064DAGISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:4
+; GFX1064DAGISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8
+; GFX1064DAGISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:12
+; GFX1064DAGISEL-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16
+; GFX1064DAGISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:20
+; GFX1064DAGISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:24
+; GFX1064DAGISEL-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:28
+; GFX1064DAGISEL-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0)
+; GFX1064DAGISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX1064DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064GISEL-LABEL: divergent_value_double_dpp:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX1064GISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX1064GISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX1064GISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX1064GISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX1064GISEL-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX1064GISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX1064GISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX1064GISEL-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX1064GISEL-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0)
+; GFX1064GISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX1064GISEL-NEXT: s_or_saveexec_b64 s[4:5], -1
+; GFX1064GISEL-NEXT: v_cndmask_b32_e64 v4, 0, v2, s[4:5]
+; GFX1064GISEL-NEXT: v_cndmask_b32_e64 v5, 0x80000000, v3, s[4:5]
+; GFX1064GISEL-NEXT: v_mbcnt_lo_u32_b32 v8, -1, 0
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1064GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1064GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1064GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1064GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1064GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1064GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1064GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1064GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1064GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1064GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1064GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1064GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064GISEL-NEXT: ds_swizzle_b32 v6, v4 offset:swizzle(BROADCAST,32,15)
+; GFX1064GISEL-NEXT: ds_swizzle_b32 v7, v5 offset:swizzle(BROADCAST,32,15)
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1064GISEL-NEXT: v_mbcnt_hi_u32_b32 v6, -1, v8
+; GFX1064GISEL-NEXT: v_add_nc_u32_e32 v6, 32, v6
+; GFX1064GISEL-NEXT: v_mul_lo_u32 v6, 4, v6
+; GFX1064GISEL-NEXT: ds_permute_b32 v7, v6, v4
+; GFX1064GISEL-NEXT: ds_permute_b32 v8, v6, v5
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[7:8]
+; GFX1064GISEL-NEXT: v_add_f64 v[4:5], 0x80000000, -v[4:5]
+; GFX1064GISEL-NEXT: v_readlane_b32 s6, v4, 63
+; GFX1064GISEL-NEXT: v_readlane_b32 s7, v5, 63
+; GFX1064GISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GFX1064GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064GISEL-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX1064GISEL-NEXT: s_clause 0x7 ; 32-byte Folded Reload
+; GFX1064GISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32
+; GFX1064GISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:4
+; GFX1064GISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8
+; GFX1064GISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:12
+; GFX1064GISEL-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16
+; GFX1064GISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:20
+; GFX1064GISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:24
+; GFX1064GISEL-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:28
+; GFX1064GISEL-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0)
+; GFX1064GISEL-NEXT: s_mov_b64 exec, s[4:5]
+; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX1064GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032DAGISEL-LABEL: divergent_value_double_dpp:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_xor_saveexec_b32 s4, -1
+; GFX1032DAGISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX1032DAGISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX1032DAGISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX1032DAGISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX1032DAGISEL-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0)
+; GFX1032DAGISEL-NEXT: s_mov_b32 exec_lo, s4
+; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s6, -1
+; GFX1032DAGISEL-NEXT: v_cndmask_b32_e64 v4, 0, v2, s6
+; GFX1032DAGISEL-NEXT: v_cndmask_b32_e64 v5, 0x80000000, v3, s6
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1032DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1032DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1032DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1032DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1032DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1032DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1032DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1032DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1032DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1032DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1032DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1032DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032DAGISEL-NEXT: ds_swizzle_b32 v6, v4 offset:swizzle(BROADCAST,32,15)
+; GFX1032DAGISEL-NEXT: ds_swizzle_b32 v7, v5 offset:swizzle(BROADCAST,32,15)
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032DAGISEL-NEXT: v_add_f64 v[4:5], 0x80000000, -v[4:5]
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s4, v4, 31
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s5, v5, 31
+; GFX1032DAGISEL-NEXT: s_mov_b32 exec_lo, s6
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032DAGISEL-NEXT: s_xor_saveexec_b32 s4, -1
+; GFX1032DAGISEL-NEXT: s_clause 0x3 ; 16-byte Folded Reload
+; GFX1032DAGISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32
+; GFX1032DAGISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:4
+; GFX1032DAGISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8
+; GFX1032DAGISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:12
+; GFX1032DAGISEL-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0)
+; GFX1032DAGISEL-NEXT: s_mov_b32 exec_lo, s4
+; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX1032DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032GISEL-LABEL: divergent_value_double_dpp:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_xor_saveexec_b32 s4, -1
+; GFX1032GISEL-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX1032GISEL-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX1032GISEL-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX1032GISEL-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX1032GISEL-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0)
+; GFX1032GISEL-NEXT: s_mov_b32 exec_lo, s4
+; GFX1032GISEL-NEXT: s_or_saveexec_b32 s6, -1
+; GFX1032GISEL-NEXT: v_cndmask_b32_e64 v4, 0, v2, s6
+; GFX1032GISEL-NEXT: v_cndmask_b32_e64 v5, 0x80000000, v3, s6
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1032GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1032GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1032GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1032GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1032GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1032GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1032GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1032GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1032GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1032GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1032GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1032GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032GISEL-NEXT: ds_swizzle_b32 v6, v4 offset:swizzle(BROADCAST,32,15)
+; GFX1032GISEL-NEXT: ds_swizzle_b32 v7, v5 offset:swizzle(BROADCAST,32,15)
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1032GISEL-NEXT: v_add_f64 v[4:5], 0x80000000, -v[4:5]
+; GFX1032GISEL-NEXT: v_readlane_b32 s4, v4, 31
+; GFX1032GISEL-NEXT: v_readlane_b32 s5, v5, 31
+; GFX1032GISEL-NEXT: s_mov_b32 exec_lo, s6
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032GISEL-NEXT: s_xor_saveexec_b32 s4, -1
+; GFX1032GISEL-NEXT: s_clause 0x3 ; 16-byte Folded Reload
+; GFX1032GISEL-NEXT: buffer_load_dword v4, off, s[0:3], s32
+; GFX1032GISEL-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:4
+; GFX1032GISEL-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8
+; GFX1032GISEL-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:12
+; GFX1032GISEL-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0)
+; GFX1032GISEL-NEXT: s_mov_b32 exec_lo, s4
+; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX1032GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164DAGISEL-LABEL: divergent_value_double_dpp:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_xor_saveexec_b64 s[0:1], -1
+; GFX1164DAGISEL-NEXT: s_clause 0x3 ; 28-byte Folded Spill
+; GFX1164DAGISEL-NEXT: scratch_store_b64 off, v[4:5], s32
+; GFX1164DAGISEL-NEXT: scratch_store_b64 off, v[6:7], s32 offset:8
+; GFX1164DAGISEL-NEXT: scratch_store_b32 off, v6, s32 offset:16
+; GFX1164DAGISEL-NEXT: scratch_store_b64 off, v[7:8], s32 offset:20
+; GFX1164DAGISEL-NEXT: s_mov_b64 exec, s[0:1]
+; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], -1
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT: v_cndmask_b32_e64 v4, 0, v2, s[0:1]
+; GFX1164DAGISEL-NEXT: v_cndmask_b32_e64 v5, 0x80000000, v3, s[0:1]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1164DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1164DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1164DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1164DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1164DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1164DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164DAGISEL-NEXT: ds_swizzle_b32 v6, v4 offset:swizzle(BROADCAST,32,15)
+; GFX1164DAGISEL-NEXT: ds_swizzle_b32 v7, v5 offset:swizzle(BROADCAST,32,15)
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164DAGISEL-NEXT: v_mbcnt_lo_u32_b32 v6, -1, 0
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT: v_mbcnt_hi_u32_b32 v6, -1, v6
+; GFX1164DAGISEL-NEXT: v_add_nc_u32_e32 v6, 32, v6
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT: v_mul_lo_u32 v6, 4, v6
+; GFX1164DAGISEL-NEXT: ds_permute_b32 v7, v6, v4
+; GFX1164DAGISEL-NEXT: ds_permute_b32 v8, v6, v5
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[7:8]
+; GFX1164DAGISEL-NEXT: v_add_f64 v[4:5], 0x80000000, -v[4:5]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s2, v4, 63
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s3, v5, 63
+; GFX1164DAGISEL-NEXT: s_mov_b64 exec, s[0:1]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164DAGISEL-NEXT: s_xor_saveexec_b64 s[0:1], -1
+; GFX1164DAGISEL-NEXT: s_waitcnt_depctr depctr_sa_sdst(0)
+; GFX1164DAGISEL-NEXT: s_clause 0x3 ; 28-byte Folded Reload
+; GFX1164DAGISEL-NEXT: scratch_load_b64 v[4:5], off, s32
+; GFX1164DAGISEL-NEXT: scratch_load_b64 v[6:7], off, s32 offset:8
+; GFX1164DAGISEL-NEXT: scratch_load_b32 v6, off, s32 offset:16
+; GFX1164DAGISEL-NEXT: scratch_load_b64 v[7:8], off, s32 offset:20
+; GFX1164DAGISEL-NEXT: s_mov_b64 exec, s[0:1]
+; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX1164DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164GISEL-LABEL: divergent_value_double_dpp:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_xor_saveexec_b64 s[0:1], -1
+; GFX1164GISEL-NEXT: s_clause 0x3 ; 28-byte Folded Spill
+; GFX1164GISEL-NEXT: scratch_store_b64 off, v[4:5], s32
+; GFX1164GISEL-NEXT: scratch_store_b64 off, v[6:7], s32 offset:8
+; GFX1164GISEL-NEXT: scratch_store_b32 off, v6, s32 offset:16
+; GFX1164GISEL-NEXT: scratch_store_b64 off, v[7:8], s32 offset:20
+; GFX1164GISEL-NEXT: s_mov_b64 exec, s[0:1]
+; GFX1164GISEL-NEXT: s_or_saveexec_b64 s[0:1], -1
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT: v_cndmask_b32_e64 v4, 0, v2, s[0:1]
+; GFX1164GISEL-NEXT: v_cndmask_b32_e64 v5, 0x80000000, v3, s[0:1]
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1164GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1164GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1164GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1164GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1164GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v6, v4
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v7, v5
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1164GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164GISEL-NEXT: ds_swizzle_b32 v6, v4 offset:swizzle(BROADCAST,32,15)
+; GFX1164GISEL-NEXT: ds_swizzle_b32 v7, v5 offset:swizzle(BROADCAST,32,15)
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1164GISEL-NEXT: v_mbcnt_lo_u32_b32 v6, -1, 0
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_mbcnt_hi_u32_b32 v6, -1, v6
+; GFX1164GISEL-NEXT: v_add_nc_u32_e32 v6, 32, v6
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_mul_lo_u32 v6, 4, v6
+; GFX1164GISEL-NEXT: ds_permute_b32 v7, v6, v4
+; GFX1164GISEL-NEXT: ds_permute_b32 v8, v6, v5
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[7:8]
+; GFX1164GISEL-NEXT: v_add_f64 v[4:5], 0x80000000, -v[4:5]
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT: v_readlane_b32 s2, v4, 63
+; GFX1164GISEL-NEXT: v_readlane_b32 s3, v5, 63
+; GFX1164GISEL-NEXT: s_mov_b64 exec, s[0:1]
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164GISEL-NEXT: s_xor_saveexec_b64 s[0:1], -1
+; GFX1164GISEL-NEXT: s_waitcnt_depctr depctr_sa_sdst(0)
+; GFX1164GISEL-NEXT: s_clause 0x3 ; 28-byte Folded Reload
+; GFX1164GISEL-NEXT: scratch_load_b64 v[4:5], off, s32
+; GFX1164GISEL-NEXT: scratch_load_b64 v[6:7], off, s32 offset:8
+; GFX1164GISEL-NEXT: scratch_load_b32 v6, off, s32 offset:16
+; GFX1164GISEL-NEXT: scratch_load_b64 v[7:8], off, s32 offset:20
+; GFX1164GISEL-NEXT: s_mov_b64 exec, s[0:1]
+; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX1164GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132DAGISEL-LABEL: divergent_value_double_dpp:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_xor_saveexec_b32 s0, -1
+; GFX1132DAGISEL-NEXT: s_clause 0x1 ; 16-byte Folded Spill
+; GFX1132DAGISEL-NEXT: scratch_store_b64 off, v[4:5], s32
+; GFX1132DAGISEL-NEXT: scratch_store_b64 off, v[6:7], s32 offset:8
+; GFX1132DAGISEL-NEXT: s_mov_b32 exec_lo, s0
+; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s2, -1
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT: v_cndmask_b32_e64 v4, 0, v2, s2
+; GFX1132DAGISEL-NEXT: v_cndmask_b32_e64 v5, 0x80000000, v3, s2
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1132DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1132DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1132DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1132DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132DAGISEL-NEXT: ds_swizzle_b32 v6, v4 offset:swizzle(BROADCAST,32,15)
+; GFX1132DAGISEL-NEXT: ds_swizzle_b32 v7, v5 offset:swizzle(BROADCAST,32,15)
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132DAGISEL-NEXT: v_add_f64 v[4:5], 0x80000000, -v[4:5]
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s0, v4, 31
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s1, v5, 31
+; GFX1132DAGISEL-NEXT: s_mov_b32 exec_lo, s2
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132DAGISEL-NEXT: s_xor_saveexec_b32 s0, -1
+; GFX1132DAGISEL-NEXT: s_clause 0x1 ; 16-byte Folded Reload
+; GFX1132DAGISEL-NEXT: scratch_load_b64 v[4:5], off, s32
+; GFX1132DAGISEL-NEXT: scratch_load_b64 v[6:7], off, s32 offset:8
+; GFX1132DAGISEL-NEXT: s_mov_b32 exec_lo, s0
+; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX1132DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132GISEL-LABEL: divergent_value_double_dpp:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_xor_saveexec_b32 s0, -1
+; GFX1132GISEL-NEXT: s_clause 0x1 ; 16-byte Folded Spill
+; GFX1132GISEL-NEXT: scratch_store_b64 off, v[4:5], s32
+; GFX1132GISEL-NEXT: scratch_store_b64 off, v[6:7], s32 offset:8
+; GFX1132GISEL-NEXT: s_mov_b32 exec_lo, s0
+; GFX1132GISEL-NEXT: s_or_saveexec_b32 s2, -1
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_cndmask_b32_e64 v4, 0, v2, s2
+; GFX1132GISEL-NEXT: v_cndmask_b32_e64 v5, 0x80000000, v3, s2
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1132GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1132GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1132GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132GISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1132GISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132GISEL-NEXT: ds_swizzle_b32 v6, v4 offset:swizzle(BROADCAST,32,15)
+; GFX1132GISEL-NEXT: ds_swizzle_b32 v7, v5 offset:swizzle(BROADCAST,32,15)
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: v_add_f64 v[4:5], v[4:5], v[6:7]
+; GFX1132GISEL-NEXT: v_add_f64 v[4:5], 0x80000000, -v[4:5]
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132GISEL-NEXT: v_readlane_b32 s0, v4, 31
+; GFX1132GISEL-NEXT: v_readlane_b32 s1, v5, 31
+; GFX1132GISEL-NEXT: s_mov_b32 exec_lo, s2
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132GISEL-NEXT: s_xor_saveexec_b32 s0, -1
+; GFX1132GISEL-NEXT: s_clause 0x1 ; 16-byte Folded Reload
+; GFX1132GISEL-NEXT: scratch_load_b64 v[4:5], off, s32
+; GFX1132GISEL-NEXT: scratch_load_b64 v[6:7], off, s32 offset:8
+; GFX1132GISEL-NEXT: s_mov_b32 exec_lo, s0
+; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX1132GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12DAGISEL-LABEL: divergent_value_double_dpp:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_expcnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: s_xor_saveexec_b32 s0, -1
+; GFX12DAGISEL-NEXT: s_clause 0x1 ; 16-byte Folded Spill
+; GFX12DAGISEL-NEXT: scratch_store_b64 off, v[4:5], s32
+; GFX12DAGISEL-NEXT: scratch_store_b64 off, v[6:7], s32 offset:8
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT: s_mov_b32 exec_lo, s0
+; GFX12DAGISEL-NEXT: s_or_saveexec_b32 s2, -1
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT: v_cndmask_b32_e64 v4, 0, v2, s2
+; GFX12DAGISEL-NEXT: v_cndmask_b32_e64 v5, 0x80000000, v3, s2
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX12DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:1 row_mask:0xf bank_mask:0xf
+; GFX12DAGISEL-NEXT: v_add_f64_e32 v[4:5], v[4:5], v[6:7]
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX12DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:2 row_mask:0xf bank_mask:0xf
+; GFX12DAGISEL-NEXT: v_add_f64_e32 v[4:5], v[4:5], v[6:7]
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX12DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:4 row_mask:0xf bank_mask:0xf
+; GFX12DAGISEL-NEXT: v_add_f64_e32 v[4:5], v[4:5], v[6:7]
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v7, v5
+; GFX12DAGISEL-NEXT: v_mov_b32_dpp v6, v6 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_mov_b32_dpp v7, v7 row_shr:8 row_mask:0xf bank_mask:0xf
+; GFX12DAGISEL-NEXT: v_add_f64_e32 v[4:5], v[4:5], v[6:7]
+; GFX12DAGISEL-NEXT: ds_swizzle_b32 v6, v4 offset:swizzle(BROADCAST,32,15)
+; GFX12DAGISEL-NEXT: ds_swizzle_b32 v7, v5 offset:swizzle(BROADCAST,32,15)
+; GFX12DAGISEL-NEXT: s_wait_dscnt 0x0
+; GFX12DAGISEL-NEXT: v_add_f64_e32 v[4:5], v[4:5], v[6:7]
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_add_f64_e64 v[4:5], 0x80000000, -v[4:5]
+; GFX12DAGISEL-NEXT: v_readlane_b32 s0, v4, 31
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12DAGISEL-NEXT: v_readlane_b32 s1, v5, 31
+; GFX12DAGISEL-NEXT: s_mov_b32 exec_lo, s2
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX12DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX12DAGISEL-NEXT: s_xor_saveexec_b32 s0, -1
+; GFX12DAGISEL-NEXT: s_clause 0x1 ; 16-byte Folded Reload
+; GFX12DAGISEL-NEXT: scratch_load_b64 v[4:5], off, s32
+; GFX12DAGISEL-NEXT: scratch_load_b64 v[6:7], off, s32 offset:8
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT: s_mov_b32 exec_lo, s0
+; GFX12DAGISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12DAGISEL-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %result = call double @llvm.amdgcn.wave.reduce.fsub(double %in, i32 2)
+ store double %result, ptr addrspace(1) %out
+ ret void
+}
+
define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in, float %in2) {
; GFX8DAGISEL-LABEL: divergent_cfg_float:
; GFX8DAGISEL: ; %bb.0: ; %entry
@@ -969,7 +1869,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX8DAGISEL-NEXT: ; implicit-def: $sgpr6
; GFX8DAGISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX8DAGISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX8DAGISEL-NEXT: ; %bb.1: ; %else
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -977,11 +1877,11 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8DAGISEL-NEXT: v_mul_f32_e64 v0, -s0, v0
; GFX8DAGISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX8DAGISEL-NEXT: .LBB3_2: ; %Flow
+; GFX8DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[2:3]
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s6
; GFX8DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
-; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX8DAGISEL-NEXT: ; %bb.3: ; %if
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -990,7 +1890,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX8DAGISEL-NEXT: v_mul_f32_e64 v0, -s1, v0
; GFX8DAGISEL-NEXT: v_readfirstlane_b32 s0, v0
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
-; GFX8DAGISEL-NEXT: .LBB3_4: ; %endif
+; GFX8DAGISEL-NEXT: .LBB4_4: ; %endif
; GFX8DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
@@ -1007,7 +1907,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX8GISEL-NEXT: ; implicit-def: $sgpr6
; GFX8GISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX8GISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX8GISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX8GISEL-NEXT: ; %bb.1: ; %else
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -1015,9 +1915,9 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8GISEL-NEXT: v_mul_f32_e64 v0, -s0, v0
; GFX8GISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX8GISEL-NEXT: .LBB3_2: ; %Flow
+; GFX8GISEL-NEXT: .LBB4_2: ; %Flow
; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[2:3]
-; GFX8GISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX8GISEL-NEXT: ; %bb.3: ; %if
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1025,7 +1925,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX8GISEL-NEXT: v_cvt_f32_i32_e32 v0, s0
; GFX8GISEL-NEXT: v_mul_f32_e64 v0, -s1, v0
; GFX8GISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX8GISEL-NEXT: .LBB3_4: ; %endif
+; GFX8GISEL-NEXT: .LBB4_4: ; %endif
; GFX8GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
@@ -1043,7 +1943,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX9DAGISEL-NEXT: ; implicit-def: $sgpr6
; GFX9DAGISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9DAGISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX9DAGISEL-NEXT: ; %bb.1: ; %else
; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -1051,11 +1951,11 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9DAGISEL-NEXT: v_mul_f32_e64 v0, -s0, v0
; GFX9DAGISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9DAGISEL-NEXT: .LBB3_2: ; %Flow
+; GFX9DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[2:3]
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s6
; GFX9DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
-; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX9DAGISEL-NEXT: ; %bb.3: ; %if
; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1064,7 +1964,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX9DAGISEL-NEXT: v_mul_f32_e64 v0, -s1, v0
; GFX9DAGISEL-NEXT: v_readfirstlane_b32 s0, v0
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s0
-; GFX9DAGISEL-NEXT: .LBB3_4: ; %endif
+; GFX9DAGISEL-NEXT: .LBB4_4: ; %endif
; GFX9DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
@@ -1080,7 +1980,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX9GISEL-NEXT: ; implicit-def: $sgpr6
; GFX9GISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9GISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX9GISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX9GISEL-NEXT: ; %bb.1: ; %else
; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -1088,9 +1988,9 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9GISEL-NEXT: v_mul_f32_e64 v0, -s0, v0
; GFX9GISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9GISEL-NEXT: .LBB3_2: ; %Flow
+; GFX9GISEL-NEXT: .LBB4_2: ; %Flow
; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[2:3]
-; GFX9GISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX9GISEL-NEXT: ; %bb.3: ; %if
; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1098,7 +1998,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX9GISEL-NEXT: v_cvt_f32_i32_e32 v0, s0
; GFX9GISEL-NEXT: v_mul_f32_e64 v0, -s1, v0
; GFX9GISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9GISEL-NEXT: .LBB3_4: ; %endif
+; GFX9GISEL-NEXT: .LBB4_4: ; %endif
; GFX9GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
@@ -1115,7 +2015,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1064DAGISEL-NEXT: ; implicit-def: $sgpr6
; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064DAGISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1064DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -1123,11 +2023,11 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064DAGISEL-NEXT: v_mul_f32_e64 v0, -s0, v0
; GFX1064DAGISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX1064DAGISEL-NEXT: .LBB3_2: ; %Flow
+; GFX1064DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[2:3]
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s6
; GFX1064DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
-; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX1064DAGISEL-NEXT: ; %bb.3: ; %if
; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1136,7 +2036,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1064DAGISEL-NEXT: v_mul_f32_e64 v0, -s1, v0
; GFX1064DAGISEL-NEXT: v_readfirstlane_b32 s0, v0
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s0
-; GFX1064DAGISEL-NEXT: .LBB3_4: ; %endif
+; GFX1064DAGISEL-NEXT: .LBB4_4: ; %endif
; GFX1064DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
@@ -1152,7 +2052,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1064GISEL-NEXT: ; implicit-def: $sgpr6
; GFX1064GISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064GISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX1064GISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1064GISEL-NEXT: ; %bb.1: ; %else
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -1160,9 +2060,9 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064GISEL-NEXT: v_mul_f32_e64 v0, -s0, v0
; GFX1064GISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX1064GISEL-NEXT: .LBB3_2: ; %Flow
+; GFX1064GISEL-NEXT: .LBB4_2: ; %Flow
; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[2:3]
-; GFX1064GISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX1064GISEL-NEXT: ; %bb.3: ; %if
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1170,7 +2070,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1064GISEL-NEXT: v_cvt_f32_i32_e32 v0, s0
; GFX1064GISEL-NEXT: v_mul_f32_e64 v0, -s1, v0
; GFX1064GISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX1064GISEL-NEXT: .LBB3_4: ; %endif
+; GFX1064GISEL-NEXT: .LBB4_4: ; %endif
; GFX1064GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
@@ -1187,7 +2087,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1032DAGISEL-NEXT: ; implicit-def: $sgpr3
; GFX1032DAGISEL-NEXT: s_and_saveexec_b32 s2, vcc_lo
; GFX1032DAGISEL-NEXT: s_xor_b32 s2, exec_lo, s2
-; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1032DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1032DAGISEL-NEXT: s_mov_b32 s3, exec_lo
; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s3, s3
@@ -1195,12 +2095,12 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032DAGISEL-NEXT: v_mul_f32_e64 v0, -s0, v0
; GFX1032DAGISEL-NEXT: v_readfirstlane_b32 s3, v0
-; GFX1032DAGISEL-NEXT: .LBB3_2: ; %Flow
+; GFX1032DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s0, s2
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s3
; GFX1032DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX1032DAGISEL-NEXT: ; %bb.3: ; %if
; GFX1032DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
@@ -1208,7 +2108,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1032DAGISEL-NEXT: v_mul_f32_e64 v0, -s1, v0
; GFX1032DAGISEL-NEXT: v_readfirstlane_b32 s1, v0
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s1
-; GFX1032DAGISEL-NEXT: .LBB3_4: ; %endif
+; GFX1032DAGISEL-NEXT: .LBB4_4: ; %endif
; GFX1032DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, 0
@@ -1223,7 +2123,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1032GISEL-NEXT: ; implicit-def: $sgpr2
; GFX1032GISEL-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX1032GISEL-NEXT: s_xor_b32 s3, exec_lo, s3
-; GFX1032GISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1032GISEL-NEXT: ; %bb.1: ; %else
; GFX1032GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
@@ -1231,17 +2131,17 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032GISEL-NEXT: v_mul_f32_e64 v0, -s0, v0
; GFX1032GISEL-NEXT: v_readfirstlane_b32 s2, v0
-; GFX1032GISEL-NEXT: .LBB3_2: ; %Flow
+; GFX1032GISEL-NEXT: .LBB4_2: ; %Flow
; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s0, s3
-; GFX1032GISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX1032GISEL-NEXT: ; %bb.3: ; %if
; GFX1032GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
; GFX1032GISEL-NEXT: v_cvt_f32_i32_e32 v0, s2
; GFX1032GISEL-NEXT: v_mul_f32_e64 v0, -s1, v0
; GFX1032GISEL-NEXT: v_readfirstlane_b32 s2, v0
-; GFX1032GISEL-NEXT: .LBB3_4: ; %endif
+; GFX1032GISEL-NEXT: .LBB4_4: ; %endif
; GFX1032GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1032GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2
@@ -1259,7 +2159,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX1164DAGISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1164DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -1269,12 +2169,12 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164DAGISEL-NEXT: v_mul_f32_e64 v0, -s0, v0
; GFX1164DAGISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX1164DAGISEL-NEXT: .LBB3_2: ; %Flow
+; GFX1164DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[2:3]
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s6
; GFX1164DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
-; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX1164DAGISEL-NEXT: ; %bb.3: ; %if
; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1285,7 +2185,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164DAGISEL-NEXT: v_readfirstlane_b32 s0, v0
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s0
-; GFX1164DAGISEL-NEXT: .LBB3_4: ; %endif
+; GFX1164DAGISEL-NEXT: .LBB4_4: ; %endif
; GFX1164DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
@@ -1303,7 +2203,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
; GFX1164GISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX1164GISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1164GISEL-NEXT: ; %bb.1: ; %else
; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -1313,9 +2213,9 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164GISEL-NEXT: v_mul_f32_e64 v0, -s0, v0
; GFX1164GISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX1164GISEL-NEXT: .LBB3_2: ; %Flow
+; GFX1164GISEL-NEXT: .LBB4_2: ; %Flow
; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[2:3], s[2:3]
-; GFX1164GISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX1164GISEL-NEXT: ; %bb.3: ; %if
; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1325,7 +2225,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1164GISEL-NEXT: v_mul_f32_e64 v0, -s1, v0
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164GISEL-NEXT: v_readfirstlane_b32 s6, v0
-; GFX1164GISEL-NEXT: .LBB3_4: ; %endif
+; GFX1164GISEL-NEXT: .LBB4_4: ; %endif
; GFX1164GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
@@ -1345,7 +2245,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX1132DAGISEL-NEXT: s_xor_b32 s2, exec_lo, s2
-; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1132DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1132DAGISEL-NEXT: s_mov_b32 s3, exec_lo
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -1355,13 +2255,13 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1132DAGISEL-NEXT: v_mul_f32_e64 v0, -s0, v0
; GFX1132DAGISEL-NEXT: v_readfirstlane_b32 s3, v0
-; GFX1132DAGISEL-NEXT: .LBB3_2: ; %Flow
+; GFX1132DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s0, s2
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, s3
; GFX1132DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX1132DAGISEL-NEXT: ; %bb.3: ; %if
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -1372,7 +2272,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1132DAGISEL-NEXT: v_readfirstlane_b32 s1, v0
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, s1
-; GFX1132DAGISEL-NEXT: .LBB3_4: ; %endif
+; GFX1132DAGISEL-NEXT: .LBB4_4: ; %endif
; GFX1132DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v1, 0
@@ -1389,7 +2289,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
; GFX1132GISEL-NEXT: s_xor_b32 s3, exec_lo, s3
-; GFX1132GISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1132GISEL-NEXT: ; %bb.1: ; %else
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -1399,10 +2299,10 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1132GISEL-NEXT: v_mul_f32_e64 v0, -s0, v0
; GFX1132GISEL-NEXT: v_readfirstlane_b32 s2, v0
-; GFX1132GISEL-NEXT: .LBB3_2: ; %Flow
+; GFX1132GISEL-NEXT: .LBB4_2: ; %Flow
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s0, s3
-; GFX1132GISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX1132GISEL-NEXT: ; %bb.3: ; %if
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -1411,7 +2311,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1132GISEL-NEXT: v_mul_f32_e64 v0, -s1, v0
; GFX1132GISEL-NEXT: v_readfirstlane_b32 s2, v0
-; GFX1132GISEL-NEXT: .LBB3_4: ; %endif
+; GFX1132GISEL-NEXT: .LBB4_4: ; %endif
; GFX1132GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -1429,7 +2329,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX12DAGISEL-NEXT: s_xor_b32 s2, exec_lo, s2
-; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB3_2
+; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX12DAGISEL-NEXT: ; %bb.1: ; %else
; GFX12DAGISEL-NEXT: s_mov_b32 s3, exec_lo
; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -1439,14 +2339,14 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12DAGISEL-NEXT: v_mul_f32_e64 v0, -s0, v0
; GFX12DAGISEL-NEXT: v_readfirstlane_b32 s3, v0
-; GFX12DAGISEL-NEXT: .LBB3_2: ; %Flow
+; GFX12DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
; GFX12DAGISEL-NEXT: s_or_saveexec_b32 s0, s2
; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12DAGISEL-NEXT: v_mov_b32_e32 v0, s3
; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; GFX12DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB3_4
+; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB4_4
; GFX12DAGISEL-NEXT: ; %bb.3: ; %if
; GFX12DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
@@ -1459,7 +2359,7 @@ define amdgpu_kernel void @divergent_cfg_float(ptr addrspace(1) %out, float %in,
; GFX12DAGISEL-NEXT: s_wait_alu depctr_va_sdst(0)
; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12DAGISEL-NEXT: v_mov_b32_e32 v0, s1
-; GFX12DAGISEL-NEXT: .LBB3_4: ; %endif
+; GFX12DAGISEL-NEXT: .LBB4_4: ; %endif
; GFX12DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX12DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX12DAGISEL-NEXT: v_mov_b32_e32 v1, 0
@@ -1720,7 +2620,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], 0
; GFX8DAGISEL-NEXT: s_mov_b64 s[4:5], exec
-; GFX8DAGISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX8DAGISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s10, s[4:5]
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v4, s6
; GFX8DAGISEL-NEXT: v_readlane_b32 s8, v2, s10
@@ -1731,7 +2631,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX8DAGISEL-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX8DAGISEL-NEXT: v_readfirstlane_b32 s6, v4
; GFX8DAGISEL-NEXT: v_readfirstlane_b32 s7, v5
-; GFX8DAGISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX8DAGISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX8DAGISEL-NEXT: ; %bb.2:
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s6
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s7
@@ -1744,7 +2644,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], 0
; GFX8GISEL-NEXT: s_mov_b64 s[4:5], exec
-; GFX8GISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX8GISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX8GISEL-NEXT: s_ff1_i32_b64 s10, s[4:5]
; GFX8GISEL-NEXT: v_mov_b32_e32 v4, s6
; GFX8GISEL-NEXT: v_readlane_b32 s8, v2, s10
@@ -1755,7 +2655,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX8GISEL-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX8GISEL-NEXT: v_readfirstlane_b32 s6, v4
; GFX8GISEL-NEXT: v_readfirstlane_b32 s7, v5
-; GFX8GISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX8GISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX8GISEL-NEXT: ; %bb.2:
; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s6
; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s7
@@ -1768,7 +2668,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], 0
; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], exec
-; GFX9DAGISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX9DAGISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s10, s[4:5]
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v4, s6
; GFX9DAGISEL-NEXT: v_readlane_b32 s8, v2, s10
@@ -1779,7 +2679,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX9DAGISEL-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX9DAGISEL-NEXT: v_readfirstlane_b32 s6, v4
; GFX9DAGISEL-NEXT: v_readfirstlane_b32 s7, v5
-; GFX9DAGISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX9DAGISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX9DAGISEL-NEXT: ; %bb.2:
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, s6
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v3, s7
@@ -1792,7 +2692,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9GISEL-NEXT: s_mov_b64 s[6:7], 0
; GFX9GISEL-NEXT: s_mov_b64 s[4:5], exec
-; GFX9GISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX9GISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX9GISEL-NEXT: s_ff1_i32_b64 s10, s[4:5]
; GFX9GISEL-NEXT: v_mov_b32_e32 v4, s6
; GFX9GISEL-NEXT: v_readlane_b32 s8, v2, s10
@@ -1803,7 +2703,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX9GISEL-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX9GISEL-NEXT: v_readfirstlane_b32 s6, v4
; GFX9GISEL-NEXT: v_readfirstlane_b32 s7, v5
-; GFX9GISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX9GISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX9GISEL-NEXT: ; %bb.2:
; GFX9GISEL-NEXT: v_mov_b32_e32 v2, s6
; GFX9GISEL-NEXT: v_mov_b32_e32 v3, s7
@@ -1816,7 +2716,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], 0
; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], exec
-; GFX1064DAGISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064DAGISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s10, s[4:5]
; GFX1064DAGISEL-NEXT: v_readlane_b32 s8, v2, s10
; GFX1064DAGISEL-NEXT: v_readlane_b32 s9, v3, s10
@@ -1825,7 +2725,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1064DAGISEL-NEXT: v_add_f64 v[4:5], -s[8:9], s[6:7]
; GFX1064DAGISEL-NEXT: v_readfirstlane_b32 s6, v4
; GFX1064DAGISEL-NEXT: v_readfirstlane_b32 s7, v5
-; GFX1064DAGISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX1064DAGISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1064DAGISEL-NEXT: ; %bb.2:
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, s6
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v3, s7
@@ -1837,7 +2737,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], 0
; GFX1064GISEL-NEXT: s_mov_b64 s[4:5], exec
-; GFX1064GISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064GISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX1064GISEL-NEXT: s_ff1_i32_b64 s10, s[4:5]
; GFX1064GISEL-NEXT: v_readlane_b32 s8, v2, s10
; GFX1064GISEL-NEXT: v_readlane_b32 s9, v3, s10
@@ -1846,7 +2746,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1064GISEL-NEXT: v_add_f64 v[4:5], -s[8:9], s[6:7]
; GFX1064GISEL-NEXT: v_readfirstlane_b32 s6, v4
; GFX1064GISEL-NEXT: v_readfirstlane_b32 s7, v5
-; GFX1064GISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX1064GISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1064GISEL-NEXT: ; %bb.2:
; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, s6
; GFX1064GISEL-NEXT: v_mov_b32_e32 v3, s7
@@ -1858,7 +2758,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1032DAGISEL-NEXT: s_mov_b64 s[4:5], 0
; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
-; GFX1032DAGISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032DAGISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032DAGISEL-NEXT: v_readlane_b32 s8, v2, s7
; GFX1032DAGISEL-NEXT: v_readlane_b32 s9, v3, s7
@@ -1867,7 +2767,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1032DAGISEL-NEXT: v_add_f64 v[4:5], -s[8:9], s[4:5]
; GFX1032DAGISEL-NEXT: v_readfirstlane_b32 s4, v4
; GFX1032DAGISEL-NEXT: v_readfirstlane_b32 s5, v5
-; GFX1032DAGISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX1032DAGISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1032DAGISEL-NEXT: ; %bb.2:
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, s4
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v3, s5
@@ -1879,7 +2779,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1032GISEL-NEXT: s_mov_b64 s[4:5], 0
; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
-; GFX1032GISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032GISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032GISEL-NEXT: v_readlane_b32 s8, v2, s7
; GFX1032GISEL-NEXT: v_readlane_b32 s9, v3, s7
@@ -1888,7 +2788,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1032GISEL-NEXT: v_add_f64 v[4:5], -s[8:9], s[4:5]
; GFX1032GISEL-NEXT: v_readfirstlane_b32 s4, v4
; GFX1032GISEL-NEXT: v_readfirstlane_b32 s5, v5
-; GFX1032GISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX1032GISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1032GISEL-NEXT: ; %bb.2:
; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, s4
; GFX1032GISEL-NEXT: v_mov_b32_e32 v3, s5
@@ -1900,7 +2800,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], 0
; GFX1164DAGISEL-NEXT: s_mov_b64 s[0:1], exec
-; GFX1164DAGISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s6, s[0:1]
; GFX1164DAGISEL-NEXT: v_readlane_b32 s4, v2, s6
@@ -1912,7 +2812,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1164DAGISEL-NEXT: v_readfirstlane_b32 s2, v4
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1164DAGISEL-NEXT: v_readfirstlane_b32 s3, v5
-; GFX1164DAGISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX1164DAGISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1164DAGISEL-NEXT: ; %bb.2:
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, s2
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v3, s3
@@ -1924,7 +2824,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], 0
; GFX1164GISEL-NEXT: s_mov_b64 s[0:1], exec
-; GFX1164GISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164GISEL-NEXT: s_ctz_i32_b64 s6, s[0:1]
; GFX1164GISEL-NEXT: v_readlane_b32 s4, v2, s6
@@ -1936,7 +2836,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1164GISEL-NEXT: v_readfirstlane_b32 s2, v4
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1164GISEL-NEXT: v_readfirstlane_b32 s3, v5
-; GFX1164GISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX1164GISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1164GISEL-NEXT: ; %bb.2:
; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, s2
; GFX1164GISEL-NEXT: v_mov_b32_e32 v3, s3
@@ -1948,7 +2848,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1132DAGISEL-NEXT: s_mov_b64 s[0:1], 0
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
@@ -1960,7 +2860,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1132DAGISEL-NEXT: v_readfirstlane_b32 s0, v4
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1132DAGISEL-NEXT: v_readfirstlane_b32 s1, v5
-; GFX1132DAGISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX1132DAGISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1132DAGISEL-NEXT: ; %bb.2:
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX1132DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
@@ -1971,7 +2871,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1132GISEL-NEXT: s_mov_b64 s[0:1], 0
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
@@ -1983,7 +2883,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX1132GISEL-NEXT: v_readfirstlane_b32 s0, v4
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1132GISEL-NEXT: v_readfirstlane_b32 s1, v5
-; GFX1132GISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX1132GISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1132GISEL-NEXT: ; %bb.2:
; GFX1132GISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX1132GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
@@ -1998,7 +2898,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
; GFX12DAGISEL-NEXT: s_mov_b64 s[0:1], 0
; GFX12DAGISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX12DAGISEL-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX12DAGISEL-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; GFX12DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
@@ -2011,7 +2911,7 @@ define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12DAGISEL-NEXT: v_readfirstlane_b32 s0, v4
; GFX12DAGISEL-NEXT: v_readfirstlane_b32 s1, v5
-; GFX12DAGISEL-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX12DAGISEL-NEXT: s_cbranch_scc1 .LBB6_1
; GFX12DAGISEL-NEXT: ; %bb.2:
; GFX12DAGISEL-NEXT: s_wait_alu depctr_va_sdst(0)
; GFX12DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
@@ -2032,7 +2932,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX8DAGISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
; GFX8DAGISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
; GFX8DAGISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
-; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX8DAGISEL-NEXT: ; %bb.1: ; %else
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -2041,13 +2941,13 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX8DAGISEL-NEXT: v_mul_f64 v[0:1], -s[2:3], v[0:1]
; GFX8DAGISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX8DAGISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX8DAGISEL-NEXT: .LBB6_2: ; %Flow
+; GFX8DAGISEL-NEXT: .LBB7_2: ; %Flow
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[8:9]
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s6
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s7
; GFX8DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
-; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX8DAGISEL-NEXT: ; %bb.3: ; %if
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -2057,7 +2957,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX8DAGISEL-NEXT: v_readfirstlane_b32 s5, v1
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s4
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s5
-; GFX8DAGISEL-NEXT: .LBB6_4: ; %endif
+; GFX8DAGISEL-NEXT: .LBB7_4: ; %endif
; GFX8DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
@@ -2071,7 +2971,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX8GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
; GFX8GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
; GFX8GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
-; GFX8GISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX8GISEL-NEXT: ; %bb.1: ; %else
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -2080,10 +2980,10 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX8GISEL-NEXT: v_mul_f64 v[0:1], -s[2:3], v[0:1]
; GFX8GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX8GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX8GISEL-NEXT: .LBB6_2: ; %Flow
+; GFX8GISEL-NEXT: .LBB7_2: ; %Flow
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
-; GFX8GISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX8GISEL-NEXT: ; %bb.3: ; %if
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX8GISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
@@ -2093,7 +2993,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX8GISEL-NEXT: v_mul_f64 v[0:1], -s[4:5], v[0:1]
; GFX8GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX8GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX8GISEL-NEXT: .LBB6_4: ; %endif
+; GFX8GISEL-NEXT: .LBB7_4: ; %endif
; GFX8GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s6
; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
@@ -2110,7 +3010,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX9DAGISEL-NEXT: ; implicit-def: $sgpr4_sgpr5
; GFX9DAGISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
; GFX9DAGISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
-; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX9DAGISEL-NEXT: ; %bb.1: ; %else
; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], exec
; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
@@ -2119,13 +3019,13 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX9DAGISEL-NEXT: v_mul_f64 v[0:1], -s[2:3], v[0:1]
; GFX9DAGISEL-NEXT: v_readfirstlane_b32 s4, v0
; GFX9DAGISEL-NEXT: v_readfirstlane_b32 s5, v1
-; GFX9DAGISEL-NEXT: .LBB6_2: ; %Flow
+; GFX9DAGISEL-NEXT: .LBB7_2: ; %Flow
; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[8:9]
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s4
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s5
; GFX9DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
-; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX9DAGISEL-NEXT: ; %bb.3: ; %if
; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], exec
; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
@@ -2135,7 +3035,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX9DAGISEL-NEXT: v_readfirstlane_b32 s5, v1
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s4
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s5
-; GFX9DAGISEL-NEXT: .LBB6_4: ; %endif
+; GFX9DAGISEL-NEXT: .LBB7_4: ; %endif
; GFX9DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -2148,7 +3048,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX9GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
; GFX9GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
; GFX9GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
-; GFX9GISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX9GISEL-NEXT: ; %bb.1: ; %else
; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -2157,10 +3057,10 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX9GISEL-NEXT: v_mul_f64 v[0:1], -s[2:3], v[0:1]
; GFX9GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX9GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9GISEL-NEXT: .LBB6_2: ; %Flow
+; GFX9GISEL-NEXT: .LBB7_2: ; %Flow
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
-; GFX9GISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX9GISEL-NEXT: ; %bb.3: ; %if
; GFX9GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX9GISEL-NEXT: s_mov_b64 s[4:5], exec
@@ -2170,7 +3070,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX9GISEL-NEXT: v_mul_f64 v[0:1], -s[6:7], v[0:1]
; GFX9GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX9GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9GISEL-NEXT: .LBB6_4: ; %endif
+; GFX9GISEL-NEXT: .LBB7_4: ; %endif
; GFX9GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s6
; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s7
@@ -2187,7 +3087,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1064DAGISEL-NEXT: ; implicit-def: $sgpr8_sgpr9
; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX1064DAGISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX1064DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1064DAGISEL-NEXT: s_mov_b64 s[8:9], exec
; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s8, s[8:9]
@@ -2196,13 +3096,13 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1064DAGISEL-NEXT: v_mul_f64 v[0:1], -s[2:3], v[0:1]
; GFX1064DAGISEL-NEXT: v_readfirstlane_b32 s8, v0
; GFX1064DAGISEL-NEXT: v_readfirstlane_b32 s9, v1
-; GFX1064DAGISEL-NEXT: .LBB6_2: ; %Flow
+; GFX1064DAGISEL-NEXT: .LBB7_2: ; %Flow
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[4:5]
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s8
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s9
; GFX1064DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
-; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX1064DAGISEL-NEXT: ; %bb.3: ; %if
; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], exec
; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
@@ -2212,7 +3112,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1064DAGISEL-NEXT: v_readfirstlane_b32 s5, v1
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s4
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s5
-; GFX1064DAGISEL-NEXT: .LBB6_4: ; %endif
+; GFX1064DAGISEL-NEXT: .LBB7_4: ; %endif
; GFX1064DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -2225,7 +3125,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1064GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
; GFX1064GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
; GFX1064GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
-; GFX1064GISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX1064GISEL-NEXT: ; %bb.1: ; %else
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -2234,10 +3134,10 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1064GISEL-NEXT: v_mul_f64 v[0:1], -s[2:3], v[0:1]
; GFX1064GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX1064GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX1064GISEL-NEXT: .LBB6_2: ; %Flow
+; GFX1064GISEL-NEXT: .LBB7_2: ; %Flow
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
-; GFX1064GISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX1064GISEL-NEXT: ; %bb.3: ; %if
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
@@ -2247,7 +3147,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1064GISEL-NEXT: v_mul_f64 v[0:1], -s[6:7], v[0:1]
; GFX1064GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX1064GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX1064GISEL-NEXT: .LBB6_4: ; %endif
+; GFX1064GISEL-NEXT: .LBB7_4: ; %endif
; GFX1064GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s6
; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s7
@@ -2264,7 +3164,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1032DAGISEL-NEXT: ; implicit-def: $sgpr4_sgpr5
; GFX1032DAGISEL-NEXT: s_and_saveexec_b32 s8, vcc_lo
; GFX1032DAGISEL-NEXT: s_xor_b32 s8, exec_lo, s8
-; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX1032DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1032DAGISEL-NEXT: s_mov_b32 s4, exec_lo
; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s4, s4
@@ -2273,13 +3173,13 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1032DAGISEL-NEXT: v_mul_f64 v[0:1], -s[2:3], v[0:1]
; GFX1032DAGISEL-NEXT: v_readfirstlane_b32 s4, v0
; GFX1032DAGISEL-NEXT: v_readfirstlane_b32 s5, v1
-; GFX1032DAGISEL-NEXT: .LBB6_2: ; %Flow
+; GFX1032DAGISEL-NEXT: .LBB7_2: ; %Flow
; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s2, s8
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s4
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s5
; GFX1032DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s2
-; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX1032DAGISEL-NEXT: ; %bb.3: ; %if
; GFX1032DAGISEL-NEXT: s_mov_b32 s3, exec_lo
; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s3, s3
@@ -2289,7 +3189,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1032DAGISEL-NEXT: v_readfirstlane_b32 s5, v1
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s4
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s5
-; GFX1032DAGISEL-NEXT: .LBB6_4: ; %endif
+; GFX1032DAGISEL-NEXT: .LBB7_4: ; %endif
; GFX1032DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -2302,7 +3202,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1032GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
; GFX1032GISEL-NEXT: s_and_saveexec_b32 s8, vcc_lo
; GFX1032GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
-; GFX1032GISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX1032GISEL-NEXT: ; %bb.1: ; %else
; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s6, s6
@@ -2311,10 +3211,10 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1032GISEL-NEXT: v_mul_f64 v[0:1], -s[2:3], v[0:1]
; GFX1032GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX1032GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX1032GISEL-NEXT: .LBB6_2: ; %Flow
+; GFX1032GISEL-NEXT: .LBB7_2: ; %Flow
; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s2, s8
-; GFX1032GISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX1032GISEL-NEXT: ; %bb.3: ; %if
; GFX1032GISEL-NEXT: s_mov_b32 s3, exec_lo
; GFX1032GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
@@ -2324,7 +3224,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1032GISEL-NEXT: v_mul_f64 v[0:1], -s[6:7], v[0:1]
; GFX1032GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX1032GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX1032GISEL-NEXT: .LBB6_4: ; %endif
+; GFX1032GISEL-NEXT: .LBB7_4: ; %endif
; GFX1032GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s6
; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s7
@@ -2343,7 +3243,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX1164DAGISEL-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
-; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX1164DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1164DAGISEL-NEXT: s_mov_b64 s[8:9], exec
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -2355,14 +3255,14 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1164DAGISEL-NEXT: v_readfirstlane_b32 s8, v0
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1164DAGISEL-NEXT: v_readfirstlane_b32 s9, v1
-; GFX1164DAGISEL-NEXT: .LBB6_2: ; %Flow
+; GFX1164DAGISEL-NEXT: .LBB7_2: ; %Flow
; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[6:7]
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s8
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s9
; GFX1164DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
-; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX1164DAGISEL-NEXT: ; %bb.3: ; %if
; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -2376,7 +3276,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s4
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s5
-; GFX1164DAGISEL-NEXT: .LBB6_4: ; %endif
+; GFX1164DAGISEL-NEXT: .LBB7_4: ; %endif
; GFX1164DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -2391,7 +3291,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
; GFX1164GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
-; GFX1164GISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX1164GISEL-NEXT: ; %bb.1: ; %else
; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -2403,10 +3303,10 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1164GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1164GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX1164GISEL-NEXT: .LBB6_2: ; %Flow
+; GFX1164GISEL-NEXT: .LBB7_2: ; %Flow
; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[2:3], s[8:9]
-; GFX1164GISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX1164GISEL-NEXT: ; %bb.3: ; %if
; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1164GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
@@ -2418,7 +3318,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1164GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX1164GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX1164GISEL-NEXT: .LBB6_4: ; %endif
+; GFX1164GISEL-NEXT: .LBB7_4: ; %endif
; GFX1164GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s6
@@ -2438,7 +3338,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX1132DAGISEL-NEXT: s_xor_b32 s8, exec_lo, s8
-; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX1132DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1132DAGISEL-NEXT: s_mov_b32 s6, exec_lo
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -2450,13 +3350,13 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1132DAGISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1132DAGISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX1132DAGISEL-NEXT: .LBB6_2: ; %Flow
+; GFX1132DAGISEL-NEXT: .LBB7_2: ; %Flow
; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s2, s8
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
; GFX1132DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s2
-; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX1132DAGISEL-NEXT: ; %bb.3: ; %if
; GFX1132DAGISEL-NEXT: s_mov_b32 s3, exec_lo
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -2468,7 +3368,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1132DAGISEL-NEXT: v_readfirstlane_b32 s5, v1
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX1132DAGISEL-NEXT: .LBB6_4: ; %endif
+; GFX1132DAGISEL-NEXT: .LBB7_4: ; %endif
; GFX1132DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -2483,7 +3383,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
; GFX1132GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
-; GFX1132GISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX1132GISEL-NEXT: ; %bb.1: ; %else
; GFX1132GISEL-NEXT: s_mov_b32 s6, exec_lo
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -2495,10 +3395,10 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1132GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1132GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX1132GISEL-NEXT: .LBB6_2: ; %Flow
+; GFX1132GISEL-NEXT: .LBB7_2: ; %Flow
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s2, s8
-; GFX1132GISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX1132GISEL-NEXT: ; %bb.3: ; %if
; GFX1132GISEL-NEXT: s_mov_b32 s3, exec_lo
; GFX1132GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
@@ -2510,7 +3410,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1132GISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX1132GISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX1132GISEL-NEXT: .LBB6_4: ; %endif
+; GFX1132GISEL-NEXT: .LBB7_4: ; %endif
; GFX1132GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
@@ -2529,7 +3429,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX12DAGISEL-NEXT: s_xor_b32 s8, exec_lo, s8
-; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB6_2
+; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB7_2
; GFX12DAGISEL-NEXT: ; %bb.1: ; %else
; GFX12DAGISEL-NEXT: s_mov_b32 s6, exec_lo
; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -2541,7 +3441,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX12DAGISEL-NEXT: v_readfirstlane_b32 s6, v0
; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX12DAGISEL-NEXT: v_readfirstlane_b32 s7, v1
-; GFX12DAGISEL-NEXT: .LBB6_2: ; %Flow
+; GFX12DAGISEL-NEXT: .LBB7_2: ; %Flow
; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
; GFX12DAGISEL-NEXT: s_or_saveexec_b32 s2, s8
; GFX12DAGISEL-NEXT: s_wait_alu depctr_va_sdst(0)
@@ -2549,7 +3449,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX12DAGISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; GFX12DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s2
-; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB6_4
+; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB7_4
; GFX12DAGISEL-NEXT: ; %bb.3: ; %if
; GFX12DAGISEL-NEXT: s_mov_b32 s3, exec_lo
; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
@@ -2563,7 +3463,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
; GFX12DAGISEL-NEXT: v_readfirstlane_b32 s5, v1
; GFX12DAGISEL-NEXT: s_wait_alu depctr_va_sdst(0)
; GFX12DAGISEL-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX12DAGISEL-NEXT: .LBB6_4: ; %endif
+; GFX12DAGISEL-NEXT: .LBB7_4: ; %endif
; GFX12DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX12DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX12DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
More information about the llvm-branch-commits
mailing list