[llvm-branch-commits] [llvm] [AMDGPU] Add wave reduce intrinsics for double types - 2 (PR #170812)

via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue Jan 27 22:56:24 PST 2026


https://github.com/easyonaadit updated https://github.com/llvm/llvm-project/pull/170812

>From 712c9510432f218a99d25323b538a5d7c68fad89 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Thu, 4 Dec 2025 14:21:56 +0530
Subject: [PATCH 01/10] [AMDGPU] Add wave reduce intrinsics for double types -
 2

Supported Ops: `add`, `sub`
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |   95 +-
 llvm/lib/Target/AMDGPU/SIInstructions.td      |    2 +
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.fadd.ll | 1115 +++++++++++++++++
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.fsub.ll | 1102 ++++++++++++++++
 4 files changed, 2295 insertions(+), 19 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index daee242e1873f..d99a70665cc37 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5525,6 +5525,10 @@ static uint64_t getIdentityValueFor64BitWaveReduction(unsigned Opc) {
     return std::numeric_limits<uint64_t>::min();
   case AMDGPU::S_AND_B64:
     return std::numeric_limits<uint64_t>::max();
+  case AMDGPU::V_ADD_F64_e64:
+    return 0x8000000000000000; // -0.0
+  case AMDGPU::V_SUB_F16_e64:
+    return 0x0; // +0.0
   default:
     llvm_unreachable(
         "Unexpected opcode in getIdentityValueFor64BitWaveReduction");
@@ -5545,7 +5549,8 @@ static bool isFloatingPointWaveReduceOperation(unsigned Opc) {
   return Opc == AMDGPU::V_MIN_F32_e64 || Opc == AMDGPU::V_MAX_F32_e64 ||
          Opc == AMDGPU::V_ADD_F32_e64 || Opc == AMDGPU::V_SUB_F32_e64 ||
          Opc == AMDGPU::V_MIN_F64_e64 || Opc == AMDGPU::V_MAX_F64_e64 ||
-         Opc == AMDGPU::V_MIN_NUM_F64_e64 || Opc == AMDGPU::V_MAX_NUM_F64_e64;
+         Opc == AMDGPU::V_MIN_NUM_F64_e64 || Opc == AMDGPU::V_MAX_NUM_F64_e64
+         Opc == AMDGPU::V_ADD_F64_e64 || Opc == AMDGPU::V_SUB_F16_e64;
 }
 
 static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
@@ -5597,9 +5602,11 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
     case AMDGPU::S_ADD_I32:
     case AMDGPU::S_ADD_U64_PSEUDO:
     case AMDGPU::V_ADD_F32_e64:
+    case AMDGPU::V_ADD_F64_e64:
     case AMDGPU::S_SUB_I32:
     case AMDGPU::S_SUB_U64_PSEUDO:
-    case AMDGPU::V_SUB_F32_e64: {
+    case AMDGPU::V_SUB_F32_e64:
+    case AMDGPU::V_SUB_F16_e64: {
       const TargetRegisterClass *WaveMaskRegClass = TRI->getWaveMaskRegClass();
       const TargetRegisterClass *DstRegClass = MRI.getRegClass(DstReg);
       Register ExecMask = MRI.createVirtualRegister(WaveMaskRegClass);
@@ -5755,28 +5762,70 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
         break;
       }
       case AMDGPU::V_ADD_F32_e64:
-      case AMDGPU::V_SUB_F32_e64: {
-        Register ActiveLanesVreg =
-            MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
-        Register DstVreg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+      case AMDGPU::V_ADD_F64_e64:
+      case AMDGPU::V_SUB_F32_e64:
+      case AMDGPU::V_SUB_F16_e64: {
+        bool is32BitOpc = is32bitWaveReduceOperation(Opc);
+        const TargetRegisterClass *VregRC =
+            is32BitOpc ? &AMDGPU::VGPR_32RegClass : TRI->getVGPR64Class();
+        Register ActiveLanesVreg = MRI.createVirtualRegister(VregRC);
+        Register DstVreg = MRI.createVirtualRegister(VregRC);
         // Get number of active lanes as a float val.
-        BuildMI(BB, MI, DL, TII->get(AMDGPU::V_CVT_F32_I32_e64),
+        BuildMI(BB, MI, DL,
+                TII->get(is32BitOpc ? AMDGPU::V_CVT_F32_I32_e64
+                                    : AMDGPU::V_CVT_F64_I32_e64),
                 ActiveLanesVreg)
             .addReg(NewAccumulator->getOperand(0).getReg())
             .addImm(0)  // clamp
             .addImm(0); // output-modifier
 
         // Take negation of input for SUB reduction
-        unsigned srcMod = Opc == AMDGPU::V_SUB_F32_e64 ? 1 : 0;
-        BuildMI(BB, MI, DL, TII->get(AMDGPU::V_MUL_F32_e64), DstVreg)
-            .addImm(srcMod) // src0 modifier
-            .addReg(SrcReg)
-            .addImm(0) // src1 modifier
-            .addReg(ActiveLanesVreg)
-            .addImm(0)  // clamp
-            .addImm(0); // output-mod
-        BuildMI(BB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), DstReg)
-            .addReg(DstVreg);
+        unsigned srcMod =
+            Opc == AMDGPU::V_SUB_F32_e64 || Opc == AMDGPU::V_SUB_F16_e64 ? 1
+                                                                         : 0;
+        unsigned MulOpc =
+            is32BitOpc ? AMDGPU::V_MUL_F32_e64
+            : ST.getGeneration() == AMDGPUSubtarget::Generation::GFX12
+                ? AMDGPU::V_MUL_F64_e64_gfx12
+                : AMDGPU::V_MUL_F64_e64;
+        auto DestVregInst = BuildMI(BB, MI, DL, TII->get(MulOpc),
+                                    DstVreg)
+                                .addImm(srcMod) // src0 modifier
+                                .addReg(SrcReg)
+                                .addImm(0) // src1 modifier
+                                .addReg(ActiveLanesVreg)
+                                .addImm(0)  // clamp
+                                .addImm(0); // output-mod
+        if (is32BitOpc) {
+          BuildMI(BB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), DstReg)
+              .addReg(DstVreg);
+        } else {
+          Register LaneValueLoReg =
+              MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
+          Register LaneValueHiReg =
+              MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
+          const TargetRegisterClass *VregSubRC =
+              TRI->getSubRegisterClass(VregRC, AMDGPU::sub0);
+          MachineOperand Op1L =
+              TII->buildExtractSubRegOrImm(MI, MRI, DestVregInst->getOperand(0),
+                                           VregRC, AMDGPU::sub0, VregSubRC);
+          MachineOperand Op1H =
+              TII->buildExtractSubRegOrImm(MI, MRI, DestVregInst->getOperand(0),
+                                           VregRC, AMDGPU::sub1, VregSubRC);
+          // lane value input should be in an sgpr
+          BuildMI(BB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32),
+                  LaneValueLoReg)
+              .add(Op1L);
+          BuildMI(BB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32),
+                  LaneValueHiReg)
+              .add(Op1H);
+          NewAccumulator =
+              BuildMI(BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), DstReg)
+                  .addReg(LaneValueLoReg)
+                  .addImm(AMDGPU::sub0)
+                  .addReg(LaneValueHiReg)
+                  .addImm(AMDGPU::sub1);
+        }
       }
       }
       RetBB = &BB;
@@ -5957,7 +6006,9 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
       case AMDGPU::V_MIN_F64_e64:
       case AMDGPU::V_MIN_NUM_F64_e64:
       case AMDGPU::V_MAX_F64_e64:
-      case AMDGPU::V_MAX_NUM_F64_e64: {
+      case AMDGPU::V_MAX_NUM_F64_e64:
+      case AMDGPU::V_ADD_F64_e64:
+      case AMDGPU::V_SUB_F16_e64: {
         int SrcIdx =
             AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src);
         const TargetRegisterClass *VregRC =
@@ -5973,8 +6024,10 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
         BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_MOV_B64_PSEUDO),
                 AccumulatorVReg)
             .addReg(Accumulator->getOperand(0).getReg());
+        unsigned Modifier = Opc == AMDGPU::V_SUB_F16_e64 ? 1 : 0;
+        Opc = Opc == AMDGPU::V_SUB_F16_e64 ? AMDGPU::V_ADD_F64_e64 : Opc;
         auto DstVregInst = BuildMI(*ComputeLoop, I, DL, TII->get(Opc), DstVreg)
-                               .addImm(SISrcMods::NONE) // src0 modifiers
+                               .addImm(Modifier) // src0 modifiers
                                .addReg(LaneValue->getOperand(0).getReg())
                                .addImm(SISrcMods::NONE) // src1 modifiers
                                .addReg(AccumulatorVReg)
@@ -6086,12 +6139,16 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_ADD_U64_PSEUDO);
   case AMDGPU::WAVE_REDUCE_FADD_PSEUDO_F32:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_ADD_F32_e64);
+  case AMDGPU::WAVE_REDUCE_FADD_PSEUDO_F64:
+    return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_ADD_F64_e64);
   case AMDGPU::WAVE_REDUCE_SUB_PSEUDO_I32:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_SUB_I32);
   case AMDGPU::WAVE_REDUCE_SUB_PSEUDO_U64:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_SUB_U64_PSEUDO);
   case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F32:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_SUB_F32_e64);
+  case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64:
+    return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_SUB_F16_e64);
   case AMDGPU::WAVE_REDUCE_AND_PSEUDO_B32:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_AND_B32);
   case AMDGPU::WAVE_REDUCE_AND_PSEUDO_B64:
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index cba3f9b3ae923..ae5ce53d693cd 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -377,7 +377,9 @@ defvar Operations = [
   WaveReduceOp<"fmax", "F32", f32, SGPR_32, VSrc_b32>,
   WaveReduceOp<"fmax", "F64", f64, SGPR_64, VSrc_b64>,
   WaveReduceOp<"fadd", "F32", f32, SGPR_32, VSrc_b32>,
+  WaveReduceOp<"fadd", "F64", f64, SGPR_64, VSrc_b64>,
   WaveReduceOp<"fsub", "F32", f32, SGPR_32, VSrc_b32>,
+  WaveReduceOp<"fsub", "F64", f64, SGPR_64, VSrc_b64>,
 ];
 
 foreach Op = Operations in {
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fadd.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fadd.ll
index 5d408dc65d68b..50b97f099c9ca 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fadd.ll
@@ -1014,6 +1014,1121 @@ endif:
   store float %combine, ptr addrspace(1) %out
   ret void
 }
+
+define amdgpu_kernel void @uniform_value_double(ptr addrspace(1) %out, double %in) {
+; GFX8DAGISEL-LABEL: uniform_value_double:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX8DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX8DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s4
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX8DAGISEL-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX8DAGISEL-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT:    s_endpgm
+;
+; GFX8GISEL-LABEL: uniform_value_double:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX8GISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX8GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s4
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX8GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX8GISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT:    s_endpgm
+;
+; GFX9DAGISEL-LABEL: uniform_value_double:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s4
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX9DAGISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX9DAGISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT:    s_endpgm
+;
+; GFX9GISEL-LABEL: uniform_value_double:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9GISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s4
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX9GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX9GISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT:    s_endpgm
+;
+; GFX1064DAGISEL-LABEL: uniform_value_double:
+; GFX1064DAGISEL:       ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[0:1], exec
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT:    s_bcnt1_i32_b64 s0, s[0:1]
+; GFX1064DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s0
+; GFX1064DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX1064DAGISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1064DAGISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT:    s_endpgm
+;
+; GFX1064GISEL-LABEL: uniform_value_double:
+; GFX1064GISEL:       ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT:    s_mov_b64 s[0:1], exec
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT:    s_bcnt1_i32_b64 s0, s[0:1]
+; GFX1064GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s0
+; GFX1064GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX1064GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1064GISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1064GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT:    s_endpgm
+;
+; GFX1032DAGISEL-LABEL: uniform_value_double:
+; GFX1032DAGISEL:       ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s0, exec_lo
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT:    s_bcnt1_i32_b32 s0, s0
+; GFX1032DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s0
+; GFX1032DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX1032DAGISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1032DAGISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT:    s_endpgm
+;
+; GFX1032GISEL-LABEL: uniform_value_double:
+; GFX1032GISEL:       ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT:    s_mov_b32 s0, exec_lo
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT:    s_bcnt1_i32_b32 s0, s0
+; GFX1032GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s0
+; GFX1032GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX1032GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1032GISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1032GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT:    s_endpgm
+;
+; GFX1164DAGISEL-LABEL: uniform_value_double:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], exec
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT:    s_bcnt1_i32_b64 s0, s[0:1]
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s0
+; GFX1164DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1164DAGISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT:    s_endpgm
+;
+; GFX1164GISEL-LABEL: uniform_value_double:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], exec
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT:    s_bcnt1_i32_b64 s0, s[0:1]
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s0
+; GFX1164GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1164GISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT:    s_endpgm
+;
+; GFX1132DAGISEL-LABEL: uniform_value_double:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, exec_lo
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    s_bcnt1_i32_b32 s0, s0
+; GFX1132DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s0
+; GFX1132DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX1132DAGISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1132DAGISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX1132DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT:    s_endpgm
+;
+; GFX1132GISEL-LABEL: uniform_value_double:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_mov_b32 s0, exec_lo
+; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT:    s_bcnt1_i32_b32 s0, s0
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s0
+; GFX1132GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1132GISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT:    s_endpgm
+;
+; GFX12DAGISEL-LABEL: uniform_value_double:
+; GFX12DAGISEL:       ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT:    s_mov_b32 s0, exec_lo
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT:    s_bcnt1_i32_b32 s0, s0
+; GFX12DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s0
+; GFX12DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12DAGISEL-NEXT:    s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT:    v_mul_f64_e64 v[0:1], s[2:3], v[0:1]
+; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX12DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_va_sdst(0)
+; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX12DAGISEL-NEXT:    s_endpgm
+entry:
+  %result = call double @llvm.amdgcn.wave.reduce.fadd(double %in, i32 1)
+  store double %result, ptr addrspace(1) %out
+  ret void
+}
+
+define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
+; GFX8DAGISEL-LABEL: divergent_value_double:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    s_mov_b32 s6, 0
+; GFX8DAGISEL-NEXT:    s_brev_b32 s7, 1
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX8DAGISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[4:5]
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v4, s6
+; GFX8DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v5, s7
+; GFX8DAGISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX8DAGISEL-NEXT:    v_add_f64 v[4:5], s[8:9], v[4:5]
+; GFX8DAGISEL-NEXT:    s_bitset0_b64 s[4:5], s10
+; GFX8DAGISEL-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX8DAGISEL-NEXT:    v_readfirstlane_b32 s6, v4
+; GFX8DAGISEL-NEXT:    v_readfirstlane_b32 s7, v5
+; GFX8DAGISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX8DAGISEL-NEXT:  ; %bb.2:
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s6
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s7
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX8DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8GISEL-LABEL: divergent_value_double:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8GISEL-NEXT:    s_mov_b32 s6, 0
+; GFX8GISEL-NEXT:    s_brev_b32 s7, 1
+; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX8GISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX8GISEL-NEXT:    s_ff1_i32_b64 s10, s[4:5]
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v4, s6
+; GFX8GISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v5, s7
+; GFX8GISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX8GISEL-NEXT:    v_add_f64 v[4:5], s[8:9], v[4:5]
+; GFX8GISEL-NEXT:    s_bitset0_b64 s[4:5], s10
+; GFX8GISEL-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX8GISEL-NEXT:    v_readfirstlane_b32 s6, v4
+; GFX8GISEL-NEXT:    v_readfirstlane_b32 s7, v5
+; GFX8GISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX8GISEL-NEXT:  ; %bb.2:
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s6
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s7
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX8GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9DAGISEL-LABEL: divergent_value_double:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    s_mov_b32 s6, 0
+; GFX9DAGISEL-NEXT:    s_brev_b32 s7, 1
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[4:5]
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v4, s6
+; GFX9DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v5, s7
+; GFX9DAGISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX9DAGISEL-NEXT:    v_add_f64 v[4:5], s[8:9], v[4:5]
+; GFX9DAGISEL-NEXT:    s_bitset0_b64 s[4:5], s10
+; GFX9DAGISEL-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX9DAGISEL-NEXT:    v_readfirstlane_b32 s6, v4
+; GFX9DAGISEL-NEXT:    v_readfirstlane_b32 s7, v5
+; GFX9DAGISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX9DAGISEL-NEXT:  ; %bb.2:
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, s6
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v3, s7
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX9DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9GISEL-LABEL: divergent_value_double:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9GISEL-NEXT:    s_mov_b32 s6, 0
+; GFX9GISEL-NEXT:    s_brev_b32 s7, 1
+; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9GISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX9GISEL-NEXT:    s_ff1_i32_b64 s10, s[4:5]
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v4, s6
+; GFX9GISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v5, s7
+; GFX9GISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX9GISEL-NEXT:    v_add_f64 v[4:5], s[8:9], v[4:5]
+; GFX9GISEL-NEXT:    s_bitset0_b64 s[4:5], s10
+; GFX9GISEL-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX9GISEL-NEXT:    v_readfirstlane_b32 s6, v4
+; GFX9GISEL-NEXT:    v_readfirstlane_b32 s7, v5
+; GFX9GISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX9GISEL-NEXT:  ; %bb.2:
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, s6
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v3, s7
+; GFX9GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX9GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1064DAGISEL-LABEL: divergent_value_double:
+; GFX1064DAGISEL:       ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064DAGISEL-NEXT:    s_mov_b32 s6, 0
+; GFX1064DAGISEL-NEXT:    s_brev_b32 s7, 1
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX1064DAGISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[4:5]
+; GFX1064DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX1064DAGISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX1064DAGISEL-NEXT:    s_bitset0_b64 s[4:5], s10
+; GFX1064DAGISEL-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX1064DAGISEL-NEXT:    v_add_f64 v[4:5], s[8:9], s[6:7]
+; GFX1064DAGISEL-NEXT:    v_readfirstlane_b32 s6, v4
+; GFX1064DAGISEL-NEXT:    v_readfirstlane_b32 s7, v5
+; GFX1064DAGISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX1064DAGISEL-NEXT:  ; %bb.2:
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v2, s6
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v3, s7
+; GFX1064DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1064GISEL-LABEL: divergent_value_double:
+; GFX1064GISEL:       ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064GISEL-NEXT:    s_mov_b32 s6, 0
+; GFX1064GISEL-NEXT:    s_brev_b32 s7, 1
+; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX1064GISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s10, s[4:5]
+; GFX1064GISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX1064GISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX1064GISEL-NEXT:    s_bitset0_b64 s[4:5], s10
+; GFX1064GISEL-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX1064GISEL-NEXT:    v_add_f64 v[4:5], s[8:9], s[6:7]
+; GFX1064GISEL-NEXT:    v_readfirstlane_b32 s6, v4
+; GFX1064GISEL-NEXT:    v_readfirstlane_b32 s7, v5
+; GFX1064GISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX1064GISEL-NEXT:  ; %bb.2:
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v2, s6
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v3, s7
+; GFX1064GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1032DAGISEL-LABEL: divergent_value_double:
+; GFX1032DAGISEL:       ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1032DAGISEL-NEXT:    s_brev_b32 s5, 1
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
+; GFX1032DAGISEL-NEXT:    v_readlane_b32 s8, v2, s7
+; GFX1032DAGISEL-NEXT:    v_readlane_b32 s9, v3, s7
+; GFX1032DAGISEL-NEXT:    s_bitset0_b32 s6, s7
+; GFX1032DAGISEL-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX1032DAGISEL-NEXT:    v_add_f64 v[4:5], s[8:9], s[4:5]
+; GFX1032DAGISEL-NEXT:    v_readfirstlane_b32 s4, v4
+; GFX1032DAGISEL-NEXT:    v_readfirstlane_b32 s5, v5
+; GFX1032DAGISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX1032DAGISEL-NEXT:  ; %bb.2:
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1032DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1032GISEL-LABEL: divergent_value_double:
+; GFX1032GISEL:       ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1032GISEL-NEXT:    s_brev_b32 s5, 1
+; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
+; GFX1032GISEL-NEXT:    v_readlane_b32 s8, v2, s7
+; GFX1032GISEL-NEXT:    v_readlane_b32 s9, v3, s7
+; GFX1032GISEL-NEXT:    s_bitset0_b32 s6, s7
+; GFX1032GISEL-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX1032GISEL-NEXT:    v_add_f64 v[4:5], s[8:9], s[4:5]
+; GFX1032GISEL-NEXT:    v_readfirstlane_b32 s4, v4
+; GFX1032GISEL-NEXT:    v_readfirstlane_b32 s5, v5
+; GFX1032GISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX1032GISEL-NEXT:  ; %bb.2:
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1032GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1164DAGISEL-LABEL: divergent_value_double:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    s_mov_b32 s2, 0
+; GFX1164DAGISEL-NEXT:    s_brev_b32 s3, 1
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], exec
+; GFX1164DAGISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s6, s[0:1]
+; GFX1164DAGISEL-NEXT:    v_readlane_b32 s4, v2, s6
+; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v3, s6
+; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[0:1], s6
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GFX1164DAGISEL-NEXT:    v_add_f64 v[4:5], s[4:5], s[2:3]
+; GFX1164DAGISEL-NEXT:    v_readfirstlane_b32 s2, v4
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT:    v_readfirstlane_b32 s3, v5
+; GFX1164DAGISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX1164DAGISEL-NEXT:  ; %bb.2:
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, s2
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v3, s3
+; GFX1164DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1164DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1164GISEL-LABEL: divergent_value_double:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_mov_b32 s2, 0
+; GFX1164GISEL-NEXT:    s_brev_b32 s3, 1
+; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], exec
+; GFX1164GISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s6, s[0:1]
+; GFX1164GISEL-NEXT:    v_readlane_b32 s4, v2, s6
+; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v3, s6
+; GFX1164GISEL-NEXT:    s_bitset0_b64 s[0:1], s6
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GFX1164GISEL-NEXT:    v_add_f64 v[4:5], s[4:5], s[2:3]
+; GFX1164GISEL-NEXT:    v_readfirstlane_b32 s2, v4
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1164GISEL-NEXT:    v_readfirstlane_b32 s3, v5
+; GFX1164GISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX1164GISEL-NEXT:  ; %bb.2:
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, s2
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v3, s3
+; GFX1164GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1164GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1132DAGISEL-LABEL: divergent_value_double:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1132DAGISEL-NEXT:    s_brev_b32 s1, 1
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
+; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
+; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
+; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX1132DAGISEL-NEXT:    v_add_f64 v[4:5], s[4:5], s[0:1]
+; GFX1132DAGISEL-NEXT:    v_readfirstlane_b32 s0, v4
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1132DAGISEL-NEXT:    v_readfirstlane_b32 s1, v5
+; GFX1132DAGISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX1132DAGISEL-NEXT:  ; %bb.2:
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1132DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1132GISEL-LABEL: divergent_value_double:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1132GISEL-NEXT:    s_brev_b32 s1, 1
+; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
+; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
+; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
+; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX1132GISEL-NEXT:    v_add_f64 v[4:5], s[4:5], s[0:1]
+; GFX1132GISEL-NEXT:    v_readfirstlane_b32 s0, v4
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1132GISEL-NEXT:    v_readfirstlane_b32 s1, v5
+; GFX1132GISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX1132GISEL-NEXT:  ; %bb.2:
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1132GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12DAGISEL-LABEL: divergent_value_double:
+; GFX12DAGISEL:       ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12DAGISEL-NEXT:    s_wait_expcnt 0x0
+; GFX12DAGISEL-NEXT:    s_wait_samplecnt 0x0
+; GFX12DAGISEL-NEXT:    s_wait_bvhcnt 0x0
+; GFX12DAGISEL-NEXT:    s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT:    s_mov_b32 s0, 0
+; GFX12DAGISEL-NEXT:    s_brev_b32 s1, 1
+; GFX12DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX12DAGISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
+; GFX12DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
+; GFX12DAGISEL-NEXT:    s_bitset0_b32 s2, s3
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX12DAGISEL-NEXT:    v_add_f64_e64 v[4:5], s[4:5], s[0:1]
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s0, v4
+; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s1, v5
+; GFX12DAGISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX12DAGISEL-NEXT:  ; %bb.2:
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_va_sdst(0)
+; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX12DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX12DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %result = call double @llvm.amdgcn.wave.reduce.fadd(double %id.x, i32 1)
+  store double %result, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %in, double %in2) {
+; GFX8DAGISEL-LABEL: divergent_cfg_double:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX8DAGISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX8DAGISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX8DAGISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8DAGISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX8DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX8DAGISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX8DAGISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX8DAGISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    s_or_saveexec_b64 s[2:3], s[8:9]
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX8DAGISEL-NEXT:    s_xor_b64 exec, exec, s[2:3]
+; GFX8DAGISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX8DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX8DAGISEL-NEXT:    v_mul_f64 v[0:1], s[4:5], v[0:1]
+; GFX8DAGISEL-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX8DAGISEL-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX8DAGISEL-NEXT:  .LBB5_4: ; %endif
+; GFX8DAGISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT:    s_endpgm
+;
+; GFX8GISEL-LABEL: divergent_cfg_double:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT:    v_cmp_le_u32_e32 vcc, 16, v0
+; GFX8GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX8GISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX8GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8GISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX8GISEL-NEXT:  ; %bb.1: ; %else
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX8GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX8GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX8GISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX8GISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX8GISEL-NEXT:  ; %bb.3: ; %if
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    v_mul_f64 v[0:1], s[4:5], v[0:1]
+; GFX8GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX8GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX8GISEL-NEXT:  .LBB5_4: ; %endif
+; GFX8GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT:    s_endpgm
+;
+; GFX9DAGISEL-LABEL: divergent_cfg_double:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX9DAGISEL-NEXT:    ; implicit-def: $sgpr4_sgpr5
+; GFX9DAGISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX9DAGISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9DAGISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX9DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s4
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX9DAGISEL-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX9DAGISEL-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX9DAGISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    s_or_saveexec_b64 s[2:3], s[8:9]
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9DAGISEL-NEXT:    s_xor_b64 exec, exec, s[2:3]
+; GFX9DAGISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX9DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s4
+; GFX9DAGISEL-NEXT:    v_mul_f64 v[0:1], s[6:7], v[0:1]
+; GFX9DAGISEL-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX9DAGISEL-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9DAGISEL-NEXT:  .LBB5_4: ; %endif
+; GFX9DAGISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT:    s_endpgm
+;
+; GFX9GISEL-LABEL: divergent_cfg_double:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT:    v_cmp_le_u32_e32 vcc, 16, v0
+; GFX9GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX9GISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX9GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9GISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX9GISEL-NEXT:  ; %bb.1: ; %else
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX9GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX9GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX9GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX9GISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX9GISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX9GISEL-NEXT:  ; %bb.3: ; %if
+; GFX9GISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9GISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s4
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    v_mul_f64 v[0:1], s[6:7], v[0:1]
+; GFX9GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX9GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX9GISEL-NEXT:  .LBB5_4: ; %endif
+; GFX9GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT:    s_endpgm
+;
+; GFX1064DAGISEL-LABEL: divergent_cfg_double:
+; GFX1064DAGISEL:       ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT:    s_clause 0x1
+; GFX1064DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX1064DAGISEL-NEXT:    ; implicit-def: $sgpr8_sgpr9
+; GFX1064DAGISEL-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX1064DAGISEL-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
+; GFX1064DAGISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX1064DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[8:9], exec
+; GFX1064DAGISEL-NEXT:    s_bcnt1_i32_b64 s8, s[8:9]
+; GFX1064DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s8
+; GFX1064DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX1064DAGISEL-NEXT:    v_readfirstlane_b32 s8, v0
+; GFX1064DAGISEL-NEXT:    v_readfirstlane_b32 s9, v1
+; GFX1064DAGISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX1064DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT:    s_or_saveexec_b64 s[2:3], s[4:5]
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s8
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s9
+; GFX1064DAGISEL-NEXT:    s_xor_b64 exec, exec, s[2:3]
+; GFX1064DAGISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX1064DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX1064DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s4
+; GFX1064DAGISEL-NEXT:    v_mul_f64 v[0:1], s[6:7], v[0:1]
+; GFX1064DAGISEL-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX1064DAGISEL-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1064DAGISEL-NEXT:  .LBB5_4: ; %endif
+; GFX1064DAGISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT:    s_endpgm
+;
+; GFX1064GISEL-LABEL: divergent_cfg_double:
+; GFX1064GISEL:       ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT:    v_cmp_le_u32_e32 vcc, 16, v0
+; GFX1064GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1064GISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX1064GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1064GISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX1064GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1064GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX1064GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX1064GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX1064GISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX1064GISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX1064GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1064GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX1064GISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    v_mul_f64 v[0:1], s[6:7], v[0:1]
+; GFX1064GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX1064GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX1064GISEL-NEXT:  .LBB5_4: ; %endif
+; GFX1064GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT:    s_endpgm
+;
+; GFX1032DAGISEL-LABEL: divergent_cfg_double:
+; GFX1032DAGISEL:       ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT:    s_clause 0x1
+; GFX1032DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc_lo, 15, v0
+; GFX1032DAGISEL-NEXT:    ; implicit-def: $sgpr4_sgpr5
+; GFX1032DAGISEL-NEXT:    s_and_saveexec_b32 s8, vcc_lo
+; GFX1032DAGISEL-NEXT:    s_xor_b32 s8, exec_lo, s8
+; GFX1032DAGISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX1032DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, exec_lo
+; GFX1032DAGISEL-NEXT:    s_bcnt1_i32_b32 s4, s4
+; GFX1032DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s4
+; GFX1032DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX1032DAGISEL-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX1032DAGISEL-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX1032DAGISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX1032DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT:    s_or_saveexec_b32 s2, s8
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1032DAGISEL-NEXT:    s_xor_b32 exec_lo, exec_lo, s2
+; GFX1032DAGISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX1032DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s3, exec_lo
+; GFX1032DAGISEL-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX1032DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s3
+; GFX1032DAGISEL-NEXT:    v_mul_f64 v[0:1], s[6:7], v[0:1]
+; GFX1032DAGISEL-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX1032DAGISEL-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1032DAGISEL-NEXT:  .LBB5_4: ; %endif
+; GFX1032DAGISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT:    s_endpgm
+;
+; GFX1032GISEL-LABEL: divergent_cfg_double:
+; GFX1032GISEL:       ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT:    v_cmp_le_u32_e32 vcc_lo, 16, v0
+; GFX1032GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1032GISEL-NEXT:    s_and_saveexec_b32 s8, vcc_lo
+; GFX1032GISEL-NEXT:    s_xor_b32 s8, exec_lo, s8
+; GFX1032GISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX1032GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT:    s_bcnt1_i32_b32 s6, s6
+; GFX1032GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX1032GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX1032GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX1032GISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    s_andn2_saveexec_b32 s2, s8
+; GFX1032GISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX1032GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1032GISEL-NEXT:    s_mov_b32 s3, exec_lo
+; GFX1032GISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032GISEL-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX1032GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s3
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    v_mul_f64 v[0:1], s[6:7], v[0:1]
+; GFX1032GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX1032GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX1032GISEL-NEXT:  .LBB5_4: ; %endif
+; GFX1032GISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT:    s_endpgm
+;
+; GFX1164DAGISEL-LABEL: divergent_cfg_double:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_clause 0x1
+; GFX1164DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164DAGISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT:    ; implicit-def: $sgpr8_sgpr9
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT:    v_cmpx_lt_u32_e32 15, v0
+; GFX1164DAGISEL-NEXT:    s_xor_b64 s[6:7], exec, s[6:7]
+; GFX1164DAGISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX1164DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[8:9], exec
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT:    s_bcnt1_i32_b64 s8, s[8:9]
+; GFX1164DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s8
+; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX1164DAGISEL-NEXT:    v_readfirstlane_b32 s8, v0
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT:    v_readfirstlane_b32 s9, v1
+; GFX1164DAGISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    s_or_saveexec_b64 s[2:3], s[6:7]
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s8
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s9
+; GFX1164DAGISEL-NEXT:    s_xor_b64 exec, exec, s[2:3]
+; GFX1164DAGISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX1164DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT:    v_mul_f64 v[0:1], s[4:5], v[0:1]
+; GFX1164DAGISEL-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1164DAGISEL-NEXT:  .LBB5_4: ; %endif
+; GFX1164DAGISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT:    s_endpgm
+;
+; GFX1164GISEL-LABEL: divergent_cfg_double:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164GISEL-NEXT:    s_mov_b64 s[8:9], exec
+; GFX1164GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1164GISEL-NEXT:    v_cmpx_le_u32_e32 16, v0
+; GFX1164GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1164GISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX1164GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1164GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX1164GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1164GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX1164GISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_and_not1_saveexec_b64 s[2:3], s[8:9]
+; GFX1164GISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX1164GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1164GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1164GISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    v_mul_f64 v[0:1], s[4:5], v[0:1]
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX1164GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX1164GISEL-NEXT:  .LBB5_4: ; %endif
+; GFX1164GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT:    s_endpgm
+;
+; GFX1132DAGISEL-LABEL: divergent_cfg_double:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_clause 0x1
+; GFX1132DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132DAGISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s8, exec_lo
+; GFX1132DAGISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT:    v_cmpx_lt_u32_e32 15, v0
+; GFX1132DAGISEL-NEXT:    s_xor_b32 s8, exec_lo, s8
+; GFX1132DAGISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX1132DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    s_bcnt1_i32_b32 s6, s6
+; GFX1132DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX1132DAGISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1132DAGISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX1132DAGISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    s_or_saveexec_b32 s2, s8
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132DAGISEL-NEXT:    s_xor_b32 exec_lo, exec_lo, s2
+; GFX1132DAGISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX1132DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s3, exec_lo
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX1132DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s3
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT:    v_mul_f64 v[0:1], s[4:5], v[0:1]
+; GFX1132DAGISEL-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX1132DAGISEL-NEXT:  .LBB5_4: ; %endif
+; GFX1132DAGISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX1132DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT:    s_endpgm
+;
+; GFX1132GISEL-LABEL: divergent_cfg_double:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132GISEL-NEXT:    s_mov_b32 s8, exec_lo
+; GFX1132GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT:    v_cmpx_le_u32_e32 16, v0
+; GFX1132GISEL-NEXT:    s_xor_b32 s8, exec_lo, s8
+; GFX1132GISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX1132GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1132GISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT:    s_bcnt1_i32_b32 s6, s6
+; GFX1132GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
+; GFX1132GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1132GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX1132GISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_and_not1_saveexec_b32 s2, s8
+; GFX1132GISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX1132GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1132GISEL-NEXT:    s_mov_b32 s3, exec_lo
+; GFX1132GISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132GISEL-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s3
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    v_mul_f64 v[0:1], s[4:5], v[0:1]
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX1132GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX1132GISEL-NEXT:  .LBB5_4: ; %endif
+; GFX1132GISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT:    s_endpgm
+;
+; GFX12DAGISEL-LABEL: divergent_cfg_double:
+; GFX12DAGISEL:       ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT:    s_clause 0x1
+; GFX12DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12DAGISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX12DAGISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX12DAGISEL-NEXT:    s_mov_b32 s8, exec_lo
+; GFX12DAGISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX12DAGISEL-NEXT:    v_cmpx_lt_u32_e32 15, v0
+; GFX12DAGISEL-NEXT:    s_xor_b32 s8, exec_lo, s8
+; GFX12DAGISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX12DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX12DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT:    s_bcnt1_i32_b32 s6, s6
+; GFX12DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX12DAGISEL-NEXT:    s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT:    v_mul_f64_e64 v[0:1], s[2:3], v[0:1]
+; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX12DAGISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX12DAGISEL-NEXT:    s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT:    s_or_saveexec_b32 s2, s8
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_va_sdst(0)
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT:    s_xor_b32 exec_lo, exec_lo, s2
+; GFX12DAGISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX12DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX12DAGISEL-NEXT:    s_mov_b32 s3, exec_lo
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s3
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT:    v_mul_f64_e64 v[0:1], s[4:5], v[0:1]
+; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_va_sdst(0)
+; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX12DAGISEL-NEXT:  .LBB5_4: ; %endif
+; GFX12DAGISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX12DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX12DAGISEL-NEXT:    s_endpgm
+entry:
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %d_cmp = icmp ult i32 %tid, 16
+  br i1 %d_cmp, label %if, label %else
+
+if:
+  %reducedValTid = call double @llvm.amdgcn.wave.reduce.fadd(double %in2, i32 1)
+  br label %endif
+
+else:
+  %reducedValIn = call double @llvm.amdgcn.wave.reduce.fadd(double %in, i32 1)
+  br label %endif
+
+endif:
+  %combine = phi double [%reducedValTid, %if], [%reducedValIn, %else]
+  store double %combine, ptr addrspace(1) %out
+  ret void
+}
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; GFX10DAGISEL: {{.*}}
 ; GFX10GISEL: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fsub.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fsub.ll
index 29dfb0b504f81..14a9f9650b0ae 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fsub.ll
@@ -1014,6 +1014,1108 @@ endif:
   store float %combine, ptr addrspace(1) %out
   ret void
 }
+
+define amdgpu_kernel void @uniform_value_double(ptr addrspace(1) %out, double %in) {
+; GFX8DAGISEL-LABEL: uniform_value_double:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX8DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX8DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s4
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX8DAGISEL-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX8DAGISEL-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT:    s_endpgm
+;
+; GFX8GISEL-LABEL: uniform_value_double:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX8GISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX8GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s4
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX8GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX8GISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT:    s_endpgm
+;
+; GFX9DAGISEL-LABEL: uniform_value_double:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s4
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX9DAGISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX9DAGISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT:    s_endpgm
+;
+; GFX9GISEL-LABEL: uniform_value_double:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9GISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s4
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX9GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX9GISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT:    s_endpgm
+;
+; GFX1064DAGISEL-LABEL: uniform_value_double:
+; GFX1064DAGISEL:       ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[0:1], exec
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT:    s_bcnt1_i32_b64 s0, s[0:1]
+; GFX1064DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s0
+; GFX1064DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX1064DAGISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1064DAGISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT:    s_endpgm
+;
+; GFX1064GISEL-LABEL: uniform_value_double:
+; GFX1064GISEL:       ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT:    s_mov_b64 s[0:1], exec
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT:    s_bcnt1_i32_b64 s0, s[0:1]
+; GFX1064GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s0
+; GFX1064GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX1064GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1064GISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1064GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT:    s_endpgm
+;
+; GFX1032DAGISEL-LABEL: uniform_value_double:
+; GFX1032DAGISEL:       ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s0, exec_lo
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT:    s_bcnt1_i32_b32 s0, s0
+; GFX1032DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s0
+; GFX1032DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX1032DAGISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1032DAGISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT:    s_endpgm
+;
+; GFX1032GISEL-LABEL: uniform_value_double:
+; GFX1032GISEL:       ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT:    s_mov_b32 s0, exec_lo
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT:    s_bcnt1_i32_b32 s0, s0
+; GFX1032GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s0
+; GFX1032GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX1032GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1032GISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1032GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT:    s_endpgm
+;
+; GFX1164DAGISEL-LABEL: uniform_value_double:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], exec
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT:    s_bcnt1_i32_b64 s0, s[0:1]
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s0
+; GFX1164DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1164DAGISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT:    s_endpgm
+;
+; GFX1164GISEL-LABEL: uniform_value_double:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], exec
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT:    s_bcnt1_i32_b64 s0, s[0:1]
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s0
+; GFX1164GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1164GISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT:    s_endpgm
+;
+; GFX1132DAGISEL-LABEL: uniform_value_double:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, exec_lo
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    s_bcnt1_i32_b32 s0, s0
+; GFX1132DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s0
+; GFX1132DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX1132DAGISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1132DAGISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX1132DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT:    s_endpgm
+;
+; GFX1132GISEL-LABEL: uniform_value_double:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_mov_b32 s0, exec_lo
+; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT:    s_bcnt1_i32_b32 s0, s0
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s0
+; GFX1132GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1132GISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT:    s_endpgm
+;
+; GFX12DAGISEL-LABEL: uniform_value_double:
+; GFX12DAGISEL:       ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT:    s_mov_b32 s0, exec_lo
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT:    s_bcnt1_i32_b32 s0, s0
+; GFX12DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s0
+; GFX12DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12DAGISEL-NEXT:    s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT:    v_mul_f64_e64 v[0:1], -s[2:3], v[0:1]
+; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX12DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_va_sdst(0)
+; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX12DAGISEL-NEXT:    s_endpgm
+entry:
+  %result = call double @llvm.amdgcn.wave.reduce.fsub(double %in, i32 1)
+  store double %result, ptr addrspace(1) %out
+  ret void
+}
+
+define void @divergent_value_double(ptr addrspace(1) %out, double %id.x) {
+; GFX8DAGISEL-LABEL: divergent_value_double:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], 0
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX8DAGISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[4:5]
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v4, s6
+; GFX8DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v5, s7
+; GFX8DAGISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX8DAGISEL-NEXT:    v_add_f64 v[4:5], -s[8:9], v[4:5]
+; GFX8DAGISEL-NEXT:    s_bitset0_b64 s[4:5], s10
+; GFX8DAGISEL-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX8DAGISEL-NEXT:    v_readfirstlane_b32 s6, v4
+; GFX8DAGISEL-NEXT:    v_readfirstlane_b32 s7, v5
+; GFX8DAGISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX8DAGISEL-NEXT:  ; %bb.2:
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s6
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s7
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX8DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8GISEL-LABEL: divergent_value_double:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], 0
+; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX8GISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX8GISEL-NEXT:    s_ff1_i32_b64 s10, s[4:5]
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v4, s6
+; GFX8GISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v5, s7
+; GFX8GISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX8GISEL-NEXT:    v_add_f64 v[4:5], -s[8:9], v[4:5]
+; GFX8GISEL-NEXT:    s_bitset0_b64 s[4:5], s10
+; GFX8GISEL-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX8GISEL-NEXT:    v_readfirstlane_b32 s6, v4
+; GFX8GISEL-NEXT:    v_readfirstlane_b32 s7, v5
+; GFX8GISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX8GISEL-NEXT:  ; %bb.2:
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s6
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s7
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX8GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9DAGISEL-LABEL: divergent_value_double:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], 0
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[4:5]
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v4, s6
+; GFX9DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v5, s7
+; GFX9DAGISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX9DAGISEL-NEXT:    v_add_f64 v[4:5], -s[8:9], v[4:5]
+; GFX9DAGISEL-NEXT:    s_bitset0_b64 s[4:5], s10
+; GFX9DAGISEL-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX9DAGISEL-NEXT:    v_readfirstlane_b32 s6, v4
+; GFX9DAGISEL-NEXT:    v_readfirstlane_b32 s7, v5
+; GFX9DAGISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX9DAGISEL-NEXT:  ; %bb.2:
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, s6
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v3, s7
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX9DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9GISEL-LABEL: divergent_value_double:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], 0
+; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9GISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX9GISEL-NEXT:    s_ff1_i32_b64 s10, s[4:5]
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v4, s6
+; GFX9GISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v5, s7
+; GFX9GISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX9GISEL-NEXT:    v_add_f64 v[4:5], -s[8:9], v[4:5]
+; GFX9GISEL-NEXT:    s_bitset0_b64 s[4:5], s10
+; GFX9GISEL-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX9GISEL-NEXT:    v_readfirstlane_b32 s6, v4
+; GFX9GISEL-NEXT:    v_readfirstlane_b32 s7, v5
+; GFX9GISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX9GISEL-NEXT:  ; %bb.2:
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, s6
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v3, s7
+; GFX9GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX9GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1064DAGISEL-LABEL: divergent_value_double:
+; GFX1064DAGISEL:       ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], 0
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX1064DAGISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[4:5]
+; GFX1064DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX1064DAGISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX1064DAGISEL-NEXT:    s_bitset0_b64 s[4:5], s10
+; GFX1064DAGISEL-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX1064DAGISEL-NEXT:    v_add_f64 v[4:5], -s[8:9], s[6:7]
+; GFX1064DAGISEL-NEXT:    v_readfirstlane_b32 s6, v4
+; GFX1064DAGISEL-NEXT:    v_readfirstlane_b32 s7, v5
+; GFX1064DAGISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX1064DAGISEL-NEXT:  ; %bb.2:
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v2, s6
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v3, s7
+; GFX1064DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1064GISEL-LABEL: divergent_value_double:
+; GFX1064GISEL:       ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], 0
+; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX1064GISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s10, s[4:5]
+; GFX1064GISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX1064GISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX1064GISEL-NEXT:    s_bitset0_b64 s[4:5], s10
+; GFX1064GISEL-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX1064GISEL-NEXT:    v_add_f64 v[4:5], -s[8:9], s[6:7]
+; GFX1064GISEL-NEXT:    v_readfirstlane_b32 s6, v4
+; GFX1064GISEL-NEXT:    v_readfirstlane_b32 s7, v5
+; GFX1064GISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX1064GISEL-NEXT:  ; %bb.2:
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v2, s6
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v3, s7
+; GFX1064GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1032DAGISEL-LABEL: divergent_value_double:
+; GFX1032DAGISEL:       ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
+; GFX1032DAGISEL-NEXT:    v_readlane_b32 s8, v2, s7
+; GFX1032DAGISEL-NEXT:    v_readlane_b32 s9, v3, s7
+; GFX1032DAGISEL-NEXT:    s_bitset0_b32 s6, s7
+; GFX1032DAGISEL-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX1032DAGISEL-NEXT:    v_add_f64 v[4:5], -s[8:9], s[4:5]
+; GFX1032DAGISEL-NEXT:    v_readfirstlane_b32 s4, v4
+; GFX1032DAGISEL-NEXT:    v_readfirstlane_b32 s5, v5
+; GFX1032DAGISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX1032DAGISEL-NEXT:  ; %bb.2:
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1032DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1032GISEL-LABEL: divergent_value_double:
+; GFX1032GISEL:       ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
+; GFX1032GISEL-NEXT:    v_readlane_b32 s8, v2, s7
+; GFX1032GISEL-NEXT:    v_readlane_b32 s9, v3, s7
+; GFX1032GISEL-NEXT:    s_bitset0_b32 s6, s7
+; GFX1032GISEL-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX1032GISEL-NEXT:    v_add_f64 v[4:5], -s[8:9], s[4:5]
+; GFX1032GISEL-NEXT:    v_readfirstlane_b32 s4, v4
+; GFX1032GISEL-NEXT:    v_readfirstlane_b32 s5, v5
+; GFX1032GISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX1032GISEL-NEXT:  ; %bb.2:
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1032GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1164DAGISEL-LABEL: divergent_value_double:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], 0
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], exec
+; GFX1164DAGISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s6, s[0:1]
+; GFX1164DAGISEL-NEXT:    v_readlane_b32 s4, v2, s6
+; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v3, s6
+; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[0:1], s6
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GFX1164DAGISEL-NEXT:    v_add_f64 v[4:5], -s[4:5], s[2:3]
+; GFX1164DAGISEL-NEXT:    v_readfirstlane_b32 s2, v4
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT:    v_readfirstlane_b32 s3, v5
+; GFX1164DAGISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX1164DAGISEL-NEXT:  ; %bb.2:
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, s2
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v3, s3
+; GFX1164DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1164DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1164GISEL-LABEL: divergent_value_double:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], 0
+; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], exec
+; GFX1164GISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s6, s[0:1]
+; GFX1164GISEL-NEXT:    v_readlane_b32 s4, v2, s6
+; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v3, s6
+; GFX1164GISEL-NEXT:    s_bitset0_b64 s[0:1], s6
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GFX1164GISEL-NEXT:    v_add_f64 v[4:5], -s[4:5], s[2:3]
+; GFX1164GISEL-NEXT:    v_readfirstlane_b32 s2, v4
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1164GISEL-NEXT:    v_readfirstlane_b32 s3, v5
+; GFX1164GISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX1164GISEL-NEXT:  ; %bb.2:
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, s2
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v3, s3
+; GFX1164GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1164GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1132DAGISEL-LABEL: divergent_value_double:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
+; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
+; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
+; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX1132DAGISEL-NEXT:    v_add_f64 v[4:5], -s[4:5], s[0:1]
+; GFX1132DAGISEL-NEXT:    v_readfirstlane_b32 s0, v4
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1132DAGISEL-NEXT:    v_readfirstlane_b32 s1, v5
+; GFX1132DAGISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX1132DAGISEL-NEXT:  ; %bb.2:
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1132DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1132GISEL-LABEL: divergent_value_double:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], 0
+; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
+; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
+; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
+; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX1132GISEL-NEXT:    v_add_f64 v[4:5], -s[4:5], s[0:1]
+; GFX1132GISEL-NEXT:    v_readfirstlane_b32 s0, v4
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1132GISEL-NEXT:    v_readfirstlane_b32 s1, v5
+; GFX1132GISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX1132GISEL-NEXT:  ; %bb.2:
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1132GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12DAGISEL-LABEL: divergent_value_double:
+; GFX12DAGISEL:       ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12DAGISEL-NEXT:    s_wait_expcnt 0x0
+; GFX12DAGISEL-NEXT:    s_wait_samplecnt 0x0
+; GFX12DAGISEL-NEXT:    s_wait_bvhcnt 0x0
+; GFX12DAGISEL-NEXT:    s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
+; GFX12DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX12DAGISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
+; GFX12DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
+; GFX12DAGISEL-NEXT:    s_bitset0_b32 s2, s3
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX12DAGISEL-NEXT:    v_add_f64_e64 v[4:5], -s[4:5], s[0:1]
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s0, v4
+; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s1, v5
+; GFX12DAGISEL-NEXT:    s_cbranch_scc1 .LBB4_1
+; GFX12DAGISEL-NEXT:  ; %bb.2:
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_va_sdst(0)
+; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX12DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX12DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %result = call double @llvm.amdgcn.wave.reduce.fsub(double %id.x, i32 1)
+  store double %result, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %in, double %in2) {
+; GFX8DAGISEL-LABEL: divergent_cfg_double:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX8DAGISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX8DAGISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX8DAGISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8DAGISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX8DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX8DAGISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX8DAGISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX8DAGISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    s_or_saveexec_b64 s[2:3], s[8:9]
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX8DAGISEL-NEXT:    s_xor_b64 exec, exec, s[2:3]
+; GFX8DAGISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX8DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX8DAGISEL-NEXT:    v_mul_f64 v[0:1], -s[4:5], v[0:1]
+; GFX8DAGISEL-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX8DAGISEL-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX8DAGISEL-NEXT:  .LBB5_4: ; %endif
+; GFX8DAGISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT:    s_endpgm
+;
+; GFX8GISEL-LABEL: divergent_cfg_double:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT:    v_cmp_le_u32_e32 vcc, 16, v0
+; GFX8GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX8GISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX8GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8GISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX8GISEL-NEXT:  ; %bb.1: ; %else
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX8GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX8GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX8GISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX8GISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX8GISEL-NEXT:  ; %bb.3: ; %if
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    v_mul_f64 v[0:1], -s[4:5], v[0:1]
+; GFX8GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX8GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX8GISEL-NEXT:  .LBB5_4: ; %endif
+; GFX8GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT:    s_endpgm
+;
+; GFX9DAGISEL-LABEL: divergent_cfg_double:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX9DAGISEL-NEXT:    ; implicit-def: $sgpr4_sgpr5
+; GFX9DAGISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX9DAGISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9DAGISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX9DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s4
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX9DAGISEL-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX9DAGISEL-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX9DAGISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    s_or_saveexec_b64 s[2:3], s[8:9]
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9DAGISEL-NEXT:    s_xor_b64 exec, exec, s[2:3]
+; GFX9DAGISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX9DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s4
+; GFX9DAGISEL-NEXT:    v_mul_f64 v[0:1], -s[6:7], v[0:1]
+; GFX9DAGISEL-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX9DAGISEL-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9DAGISEL-NEXT:  .LBB5_4: ; %endif
+; GFX9DAGISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT:    s_endpgm
+;
+; GFX9GISEL-LABEL: divergent_cfg_double:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT:    v_cmp_le_u32_e32 vcc, 16, v0
+; GFX9GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX9GISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX9GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9GISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX9GISEL-NEXT:  ; %bb.1: ; %else
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX9GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX9GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX9GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX9GISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX9GISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX9GISEL-NEXT:  ; %bb.3: ; %if
+; GFX9GISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9GISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s4
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    v_mul_f64 v[0:1], -s[6:7], v[0:1]
+; GFX9GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX9GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX9GISEL-NEXT:  .LBB5_4: ; %endif
+; GFX9GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT:    s_endpgm
+;
+; GFX1064DAGISEL-LABEL: divergent_cfg_double:
+; GFX1064DAGISEL:       ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT:    s_clause 0x1
+; GFX1064DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX1064DAGISEL-NEXT:    ; implicit-def: $sgpr8_sgpr9
+; GFX1064DAGISEL-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX1064DAGISEL-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
+; GFX1064DAGISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX1064DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[8:9], exec
+; GFX1064DAGISEL-NEXT:    s_bcnt1_i32_b64 s8, s[8:9]
+; GFX1064DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s8
+; GFX1064DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX1064DAGISEL-NEXT:    v_readfirstlane_b32 s8, v0
+; GFX1064DAGISEL-NEXT:    v_readfirstlane_b32 s9, v1
+; GFX1064DAGISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX1064DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT:    s_or_saveexec_b64 s[2:3], s[4:5]
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s8
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s9
+; GFX1064DAGISEL-NEXT:    s_xor_b64 exec, exec, s[2:3]
+; GFX1064DAGISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX1064DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX1064DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s4
+; GFX1064DAGISEL-NEXT:    v_mul_f64 v[0:1], -s[6:7], v[0:1]
+; GFX1064DAGISEL-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX1064DAGISEL-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1064DAGISEL-NEXT:  .LBB5_4: ; %endif
+; GFX1064DAGISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT:    s_endpgm
+;
+; GFX1064GISEL-LABEL: divergent_cfg_double:
+; GFX1064GISEL:       ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT:    v_cmp_le_u32_e32 vcc, 16, v0
+; GFX1064GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1064GISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX1064GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1064GISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX1064GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1064GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX1064GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX1064GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX1064GISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX1064GISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX1064GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1064GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX1064GISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    v_mul_f64 v[0:1], -s[6:7], v[0:1]
+; GFX1064GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX1064GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX1064GISEL-NEXT:  .LBB5_4: ; %endif
+; GFX1064GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT:    s_endpgm
+;
+; GFX1032DAGISEL-LABEL: divergent_cfg_double:
+; GFX1032DAGISEL:       ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT:    s_clause 0x1
+; GFX1032DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc_lo, 15, v0
+; GFX1032DAGISEL-NEXT:    ; implicit-def: $sgpr4_sgpr5
+; GFX1032DAGISEL-NEXT:    s_and_saveexec_b32 s8, vcc_lo
+; GFX1032DAGISEL-NEXT:    s_xor_b32 s8, exec_lo, s8
+; GFX1032DAGISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX1032DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, exec_lo
+; GFX1032DAGISEL-NEXT:    s_bcnt1_i32_b32 s4, s4
+; GFX1032DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s4
+; GFX1032DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX1032DAGISEL-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX1032DAGISEL-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX1032DAGISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX1032DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT:    s_or_saveexec_b32 s2, s8
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1032DAGISEL-NEXT:    s_xor_b32 exec_lo, exec_lo, s2
+; GFX1032DAGISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX1032DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s3, exec_lo
+; GFX1032DAGISEL-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX1032DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s3
+; GFX1032DAGISEL-NEXT:    v_mul_f64 v[0:1], -s[6:7], v[0:1]
+; GFX1032DAGISEL-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX1032DAGISEL-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1032DAGISEL-NEXT:  .LBB5_4: ; %endif
+; GFX1032DAGISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT:    s_endpgm
+;
+; GFX1032GISEL-LABEL: divergent_cfg_double:
+; GFX1032GISEL:       ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT:    v_cmp_le_u32_e32 vcc_lo, 16, v0
+; GFX1032GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1032GISEL-NEXT:    s_and_saveexec_b32 s8, vcc_lo
+; GFX1032GISEL-NEXT:    s_xor_b32 s8, exec_lo, s8
+; GFX1032GISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX1032GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT:    s_bcnt1_i32_b32 s6, s6
+; GFX1032GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX1032GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX1032GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX1032GISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    s_andn2_saveexec_b32 s2, s8
+; GFX1032GISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX1032GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1032GISEL-NEXT:    s_mov_b32 s3, exec_lo
+; GFX1032GISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032GISEL-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX1032GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s3
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    v_mul_f64 v[0:1], -s[6:7], v[0:1]
+; GFX1032GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX1032GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX1032GISEL-NEXT:  .LBB5_4: ; %endif
+; GFX1032GISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT:    s_endpgm
+;
+; GFX1164DAGISEL-LABEL: divergent_cfg_double:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_clause 0x1
+; GFX1164DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164DAGISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT:    ; implicit-def: $sgpr8_sgpr9
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT:    v_cmpx_lt_u32_e32 15, v0
+; GFX1164DAGISEL-NEXT:    s_xor_b64 s[6:7], exec, s[6:7]
+; GFX1164DAGISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX1164DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[8:9], exec
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT:    s_bcnt1_i32_b64 s8, s[8:9]
+; GFX1164DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s8
+; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX1164DAGISEL-NEXT:    v_readfirstlane_b32 s8, v0
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT:    v_readfirstlane_b32 s9, v1
+; GFX1164DAGISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    s_or_saveexec_b64 s[2:3], s[6:7]
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s8
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s9
+; GFX1164DAGISEL-NEXT:    s_xor_b64 exec, exec, s[2:3]
+; GFX1164DAGISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX1164DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT:    v_mul_f64 v[0:1], -s[4:5], v[0:1]
+; GFX1164DAGISEL-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1164DAGISEL-NEXT:  .LBB5_4: ; %endif
+; GFX1164DAGISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT:    s_endpgm
+;
+; GFX1164GISEL-LABEL: divergent_cfg_double:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164GISEL-NEXT:    s_mov_b64 s[8:9], exec
+; GFX1164GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1164GISEL-NEXT:    v_cmpx_le_u32_e32 16, v0
+; GFX1164GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1164GISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX1164GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1164GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX1164GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1164GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX1164GISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_and_not1_saveexec_b64 s[2:3], s[8:9]
+; GFX1164GISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX1164GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1164GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1164GISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    v_mul_f64 v[0:1], -s[4:5], v[0:1]
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX1164GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX1164GISEL-NEXT:  .LBB5_4: ; %endif
+; GFX1164GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT:    s_endpgm
+;
+; GFX1132DAGISEL-LABEL: divergent_cfg_double:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_clause 0x1
+; GFX1132DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132DAGISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s8, exec_lo
+; GFX1132DAGISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT:    v_cmpx_lt_u32_e32 15, v0
+; GFX1132DAGISEL-NEXT:    s_xor_b32 s8, exec_lo, s8
+; GFX1132DAGISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX1132DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    s_bcnt1_i32_b32 s6, s6
+; GFX1132DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX1132DAGISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1132DAGISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX1132DAGISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    s_or_saveexec_b32 s2, s8
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132DAGISEL-NEXT:    s_xor_b32 exec_lo, exec_lo, s2
+; GFX1132DAGISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX1132DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s3, exec_lo
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX1132DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s3
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT:    v_mul_f64 v[0:1], -s[4:5], v[0:1]
+; GFX1132DAGISEL-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX1132DAGISEL-NEXT:  .LBB5_4: ; %endif
+; GFX1132DAGISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX1132DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT:    s_endpgm
+;
+; GFX1132GISEL-LABEL: divergent_cfg_double:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132GISEL-NEXT:    s_mov_b32 s8, exec_lo
+; GFX1132GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT:    v_cmpx_le_u32_e32 16, v0
+; GFX1132GISEL-NEXT:    s_xor_b32 s8, exec_lo, s8
+; GFX1132GISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX1132GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1132GISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT:    s_bcnt1_i32_b32 s6, s6
+; GFX1132GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT:    v_mul_f64 v[0:1], -s[2:3], v[0:1]
+; GFX1132GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1132GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX1132GISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_and_not1_saveexec_b32 s2, s8
+; GFX1132GISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX1132GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1132GISEL-NEXT:    s_mov_b32 s3, exec_lo
+; GFX1132GISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132GISEL-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s3
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    v_mul_f64 v[0:1], -s[4:5], v[0:1]
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1132GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX1132GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX1132GISEL-NEXT:  .LBB5_4: ; %endif
+; GFX1132GISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT:    s_endpgm
+;
+; GFX12DAGISEL-LABEL: divergent_cfg_double:
+; GFX12DAGISEL:       ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT:    s_clause 0x1
+; GFX12DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12DAGISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX12DAGISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX12DAGISEL-NEXT:    s_mov_b32 s8, exec_lo
+; GFX12DAGISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX12DAGISEL-NEXT:    v_cmpx_lt_u32_e32 15, v0
+; GFX12DAGISEL-NEXT:    s_xor_b32 s8, exec_lo, s8
+; GFX12DAGISEL-NEXT:    s_cbranch_execz .LBB5_2
+; GFX12DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX12DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT:    s_bcnt1_i32_b32 s6, s6
+; GFX12DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
+; GFX12DAGISEL-NEXT:    s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT:    v_mul_f64_e64 v[0:1], -s[2:3], v[0:1]
+; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX12DAGISEL-NEXT:  .LBB5_2: ; %Flow
+; GFX12DAGISEL-NEXT:    s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT:    s_or_saveexec_b32 s2, s8
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_va_sdst(0)
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT:    s_xor_b32 exec_lo, exec_lo, s2
+; GFX12DAGISEL-NEXT:    s_cbranch_execz .LBB5_4
+; GFX12DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX12DAGISEL-NEXT:    s_mov_b32 s3, exec_lo
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s3
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT:    v_mul_f64_e64 v[0:1], -s[4:5], v[0:1]
+; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX12DAGISEL-NEXT:    s_wait_alu depctr_va_sdst(0)
+; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX12DAGISEL-NEXT:  .LBB5_4: ; %endif
+; GFX12DAGISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX12DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX12DAGISEL-NEXT:    s_endpgm
+entry:
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %d_cmp = icmp ult i32 %tid, 16
+  br i1 %d_cmp, label %if, label %else
+
+if:
+  %reducedValTid = call double @llvm.amdgcn.wave.reduce.fsub(double %in2, i32 1)
+  br label %endif
+
+else:
+  %reducedValIn = call double @llvm.amdgcn.wave.reduce.fsub(double %in, i32 1)
+  br label %endif
+
+endif:
+  %combine = phi double [%reducedValTid, %if], [%reducedValIn, %else]
+  store double %combine, ptr addrspace(1) %out
+  ret void
+}
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; GFX10DAGISEL: {{.*}}
 ; GFX10GISEL: {{.*}}

>From 0ab9687669892eadd4ccf28d84308720a5292d67 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Fri, 9 Jan 2026 15:04:53 +0530
Subject: [PATCH 02/10] Use enum values for src modifiers.

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index d99a70665cc37..0fb696110098d 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5781,13 +5781,12 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
 
         // Take negation of input for SUB reduction
         unsigned srcMod =
-            Opc == AMDGPU::V_SUB_F32_e64 || Opc == AMDGPU::V_SUB_F16_e64 ? 1
-                                                                         : 0;
-        unsigned MulOpc =
-            is32BitOpc ? AMDGPU::V_MUL_F32_e64
-            : ST.getGeneration() == AMDGPUSubtarget::Generation::GFX12
-                ? AMDGPU::V_MUL_F64_e64_gfx12
-                : AMDGPU::V_MUL_F64_e64;
+            Opc == AMDGPU::V_SUB_F32_e64 || Opc == AMDGPU::V_SUB_F16_e64
+                ? SISrcMods::NEG
+                : SISrcMods::NONE;
+        unsigned MulOpc = is32BitOpc    ? AMDGPU::V_MUL_F32_e64
+                          : IsGFX12Plus ? AMDGPU::V_MUL_F64_e64_gfx12
+                                        : AMDGPU::V_MUL_F64_e64;
         auto DestVregInst = BuildMI(BB, MI, DL, TII->get(MulOpc),
                                     DstVreg)
                                 .addImm(srcMod) // src0 modifier
@@ -6024,7 +6023,8 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
         BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_MOV_B64_PSEUDO),
                 AccumulatorVReg)
             .addReg(Accumulator->getOperand(0).getReg());
-        unsigned Modifier = Opc == AMDGPU::V_SUB_F16_e64 ? 1 : 0;
+        unsigned Modifier =
+            Opc == AMDGPU::V_SUB_F16_e64 ? SISrcMods::NEG : SISrcMods::NONE;
         Opc = Opc == AMDGPU::V_SUB_F16_e64 ? AMDGPU::V_ADD_F64_e64 : Opc;
         auto DstVregInst = BuildMI(*ComputeLoop, I, DL, TII->get(Opc), DstVreg)
                                .addImm(Modifier) // src0 modifiers

>From 8045f9678ad4180b86a82563a70e0b80b48dd9c5 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Fri, 16 Jan 2026 12:23:51 +0530
Subject: [PATCH 03/10] Use pseudo opcode for switch statements

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 0fb696110098d..937f12202fce8 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5527,7 +5527,7 @@ static uint64_t getIdentityValueFor64BitWaveReduction(unsigned Opc) {
     return std::numeric_limits<uint64_t>::max();
   case AMDGPU::V_ADD_F64_e64:
     return 0x8000000000000000; // -0.0
-  case AMDGPU::V_SUB_F16_e64:
+  case AMDGPU::V_SUB_F64_PSEUDO:
     return 0x0; // +0.0
   default:
     llvm_unreachable(
@@ -5550,7 +5550,7 @@ static bool isFloatingPointWaveReduceOperation(unsigned Opc) {
          Opc == AMDGPU::V_ADD_F32_e64 || Opc == AMDGPU::V_SUB_F32_e64 ||
          Opc == AMDGPU::V_MIN_F64_e64 || Opc == AMDGPU::V_MAX_F64_e64 ||
          Opc == AMDGPU::V_MIN_NUM_F64_e64 || Opc == AMDGPU::V_MAX_NUM_F64_e64
-         Opc == AMDGPU::V_ADD_F64_e64 || Opc == AMDGPU::V_SUB_F16_e64;
+         Opc == AMDGPU::V_ADD_F64_e64 || Opc == AMDGPU::V_SUB_F64_PSEUDO;
 }
 
 static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
@@ -5606,7 +5606,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
     case AMDGPU::S_SUB_I32:
     case AMDGPU::S_SUB_U64_PSEUDO:
     case AMDGPU::V_SUB_F32_e64:
-    case AMDGPU::V_SUB_F16_e64: {
+    case AMDGPU::V_SUB_F64_PSEUDO: {
       const TargetRegisterClass *WaveMaskRegClass = TRI->getWaveMaskRegClass();
       const TargetRegisterClass *DstRegClass = MRI.getRegClass(DstReg);
       Register ExecMask = MRI.createVirtualRegister(WaveMaskRegClass);
@@ -5764,7 +5764,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
       case AMDGPU::V_ADD_F32_e64:
       case AMDGPU::V_ADD_F64_e64:
       case AMDGPU::V_SUB_F32_e64:
-      case AMDGPU::V_SUB_F16_e64: {
+      case AMDGPU::V_SUB_F64_PSEUDO: {
         bool is32BitOpc = is32bitWaveReduceOperation(Opc);
         const TargetRegisterClass *VregRC =
             is32BitOpc ? &AMDGPU::VGPR_32RegClass : TRI->getVGPR64Class();
@@ -5781,7 +5781,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
 
         // Take negation of input for SUB reduction
         unsigned srcMod =
-            Opc == AMDGPU::V_SUB_F32_e64 || Opc == AMDGPU::V_SUB_F16_e64
+            Opc == AMDGPU::V_SUB_F32_e64 || Opc == AMDGPU::V_SUB_F64_PSEUDO
                 ? SISrcMods::NEG
                 : SISrcMods::NONE;
         unsigned MulOpc = is32BitOpc    ? AMDGPU::V_MUL_F32_e64
@@ -6007,7 +6007,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
       case AMDGPU::V_MAX_F64_e64:
       case AMDGPU::V_MAX_NUM_F64_e64:
       case AMDGPU::V_ADD_F64_e64:
-      case AMDGPU::V_SUB_F16_e64: {
+      case AMDGPU::V_SUB_F64_PSEUDO: {
         int SrcIdx =
             AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src);
         const TargetRegisterClass *VregRC =
@@ -6024,8 +6024,8 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
                 AccumulatorVReg)
             .addReg(Accumulator->getOperand(0).getReg());
         unsigned Modifier =
-            Opc == AMDGPU::V_SUB_F16_e64 ? SISrcMods::NEG : SISrcMods::NONE;
-        Opc = Opc == AMDGPU::V_SUB_F16_e64 ? AMDGPU::V_ADD_F64_e64 : Opc;
+            Opc == AMDGPU::V_SUB_F64_PSEUDO ? SISrcMods::NEG : SISrcMods::NONE;
+        Opc = Opc == AMDGPU::V_SUB_F64_PSEUDO ? AMDGPU::V_ADD_F64_e64 : Opc;
         auto DstVregInst = BuildMI(*ComputeLoop, I, DL, TII->get(Opc), DstVreg)
                                .addImm(Modifier) // src0 modifiers
                                .addReg(LaneValue->getOperand(0).getReg())
@@ -6148,7 +6148,7 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
   case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F32:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_SUB_F32_e64);
   case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64:
-    return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_SUB_F16_e64);
+    return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_SUB_F64_PSEUDO);
   case AMDGPU::WAVE_REDUCE_AND_PSEUDO_B32:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_AND_B32);
   case AMDGPU::WAVE_REDUCE_AND_PSEUDO_B64:

>From 55317af084a73ee013930a75ccf525efa1498435 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Fri, 16 Jan 2026 18:06:46 +0530
Subject: [PATCH 04/10] Use `e32` encoding as placeholder

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 937f12202fce8..ec6d29739b8ec 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5527,7 +5527,7 @@ static uint64_t getIdentityValueFor64BitWaveReduction(unsigned Opc) {
     return std::numeric_limits<uint64_t>::max();
   case AMDGPU::V_ADD_F64_e64:
     return 0x8000000000000000; // -0.0
-  case AMDGPU::V_SUB_F64_PSEUDO:
+  case AMDGPU::V_SUB_F32_e32:
     return 0x0; // +0.0
   default:
     llvm_unreachable(
@@ -5550,7 +5550,7 @@ static bool isFloatingPointWaveReduceOperation(unsigned Opc) {
          Opc == AMDGPU::V_ADD_F32_e64 || Opc == AMDGPU::V_SUB_F32_e64 ||
          Opc == AMDGPU::V_MIN_F64_e64 || Opc == AMDGPU::V_MAX_F64_e64 ||
          Opc == AMDGPU::V_MIN_NUM_F64_e64 || Opc == AMDGPU::V_MAX_NUM_F64_e64
-         Opc == AMDGPU::V_ADD_F64_e64 || Opc == AMDGPU::V_SUB_F64_PSEUDO;
+         Opc == AMDGPU::V_ADD_F64_e64 || Opc == AMDGPU::V_SUB_F32_e32;
 }
 
 static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
@@ -5606,7 +5606,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
     case AMDGPU::S_SUB_I32:
     case AMDGPU::S_SUB_U64_PSEUDO:
     case AMDGPU::V_SUB_F32_e64:
-    case AMDGPU::V_SUB_F64_PSEUDO: {
+    case AMDGPU::V_SUB_F32_e32: {
       const TargetRegisterClass *WaveMaskRegClass = TRI->getWaveMaskRegClass();
       const TargetRegisterClass *DstRegClass = MRI.getRegClass(DstReg);
       Register ExecMask = MRI.createVirtualRegister(WaveMaskRegClass);
@@ -5764,7 +5764,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
       case AMDGPU::V_ADD_F32_e64:
       case AMDGPU::V_ADD_F64_e64:
       case AMDGPU::V_SUB_F32_e64:
-      case AMDGPU::V_SUB_F64_PSEUDO: {
+      case AMDGPU::V_SUB_F32_e32: {
         bool is32BitOpc = is32bitWaveReduceOperation(Opc);
         const TargetRegisterClass *VregRC =
             is32BitOpc ? &AMDGPU::VGPR_32RegClass : TRI->getVGPR64Class();
@@ -5781,7 +5781,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
 
         // Take negation of input for SUB reduction
         unsigned srcMod =
-            Opc == AMDGPU::V_SUB_F32_e64 || Opc == AMDGPU::V_SUB_F64_PSEUDO
+            Opc == AMDGPU::V_SUB_F32_e64 || Opc == AMDGPU::V_SUB_F32_e32
                 ? SISrcMods::NEG
                 : SISrcMods::NONE;
         unsigned MulOpc = is32BitOpc    ? AMDGPU::V_MUL_F32_e64
@@ -6007,7 +6007,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
       case AMDGPU::V_MAX_F64_e64:
       case AMDGPU::V_MAX_NUM_F64_e64:
       case AMDGPU::V_ADD_F64_e64:
-      case AMDGPU::V_SUB_F64_PSEUDO: {
+      case AMDGPU::V_SUB_F32_e32: {
         int SrcIdx =
             AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src);
         const TargetRegisterClass *VregRC =
@@ -6024,8 +6024,8 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
                 AccumulatorVReg)
             .addReg(Accumulator->getOperand(0).getReg());
         unsigned Modifier =
-            Opc == AMDGPU::V_SUB_F64_PSEUDO ? SISrcMods::NEG : SISrcMods::NONE;
-        Opc = Opc == AMDGPU::V_SUB_F64_PSEUDO ? AMDGPU::V_ADD_F64_e64 : Opc;
+            Opc == AMDGPU::V_SUB_F32_e32 ? SISrcMods::NEG : SISrcMods::NONE;
+        Opc = Opc == AMDGPU::V_SUB_F32_e32 ? AMDGPU::V_ADD_F64_e64 : Opc;
         auto DstVregInst = BuildMI(*ComputeLoop, I, DL, TII->get(Opc), DstVreg)
                                .addImm(Modifier) // src0 modifiers
                                .addReg(LaneValue->getOperand(0).getReg())
@@ -6148,7 +6148,7 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
   case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F32:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_SUB_F32_e64);
   case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64:
-    return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_SUB_F64_PSEUDO);
+    return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_SUB_F32_e32);
   case AMDGPU::WAVE_REDUCE_AND_PSEUDO_B32:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_AND_B32);
   case AMDGPU::WAVE_REDUCE_AND_PSEUDO_B64:

>From a55154612f7a47acbcbb186c401ad247ced50bd7 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Mon, 19 Jan 2026 10:26:56 +0530
Subject: [PATCH 05/10] Use enum values for source modifiers

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index ec6d29739b8ec..df0e9dba730b4 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5791,10 +5791,10 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
                                     DstVreg)
                                 .addImm(srcMod) // src0 modifier
                                 .addReg(SrcReg)
-                                .addImm(0) // src1 modifier
+                                .addImm(SISrcMods::NONE) // src1 modifier
                                 .addReg(ActiveLanesVreg)
-                                .addImm(0)  // clamp
-                                .addImm(0); // output-mod
+                                .addImm(SISrcMods::NONE)  // clamp
+                                .addImm(SISrcMods::NONE); // output-mod
         if (is32BitOpc) {
           BuildMI(BB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), DstReg)
               .addReg(DstVreg);

>From 94faae9ba84fba0af63211195a379955d1c2ad3e Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Mon, 19 Jan 2026 15:19:47 +0530
Subject: [PATCH 06/10] Use `WAVE_REDUCE_FSUB_PSEUDO_F64` in switch statements

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 30 +++++++++++++----------
 1 file changed, 17 insertions(+), 13 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index df0e9dba730b4..2d3d14cd833e5 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5527,7 +5527,7 @@ static uint64_t getIdentityValueFor64BitWaveReduction(unsigned Opc) {
     return std::numeric_limits<uint64_t>::max();
   case AMDGPU::V_ADD_F64_e64:
     return 0x8000000000000000; // -0.0
-  case AMDGPU::V_SUB_F32_e32:
+  case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64:
     return 0x0; // +0.0
   default:
     llvm_unreachable(
@@ -5550,7 +5550,8 @@ static bool isFloatingPointWaveReduceOperation(unsigned Opc) {
          Opc == AMDGPU::V_ADD_F32_e64 || Opc == AMDGPU::V_SUB_F32_e64 ||
          Opc == AMDGPU::V_MIN_F64_e64 || Opc == AMDGPU::V_MAX_F64_e64 ||
          Opc == AMDGPU::V_MIN_NUM_F64_e64 || Opc == AMDGPU::V_MAX_NUM_F64_e64
-         Opc == AMDGPU::V_ADD_F64_e64 || Opc == AMDGPU::V_SUB_F32_e32;
+         Opc == AMDGPU::V_ADD_F64_e64 ||
+         Opc == AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64;
 }
 
 static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
@@ -5606,7 +5607,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
     case AMDGPU::S_SUB_I32:
     case AMDGPU::S_SUB_U64_PSEUDO:
     case AMDGPU::V_SUB_F32_e64:
-    case AMDGPU::V_SUB_F32_e32: {
+    case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64: {
       const TargetRegisterClass *WaveMaskRegClass = TRI->getWaveMaskRegClass();
       const TargetRegisterClass *DstRegClass = MRI.getRegClass(DstReg);
       Register ExecMask = MRI.createVirtualRegister(WaveMaskRegClass);
@@ -5764,7 +5765,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
       case AMDGPU::V_ADD_F32_e64:
       case AMDGPU::V_ADD_F64_e64:
       case AMDGPU::V_SUB_F32_e64:
-      case AMDGPU::V_SUB_F32_e32: {
+      case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64: {
         bool is32BitOpc = is32bitWaveReduceOperation(Opc);
         const TargetRegisterClass *VregRC =
             is32BitOpc ? &AMDGPU::VGPR_32RegClass : TRI->getVGPR64Class();
@@ -5780,10 +5781,10 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
             .addImm(0); // output-modifier
 
         // Take negation of input for SUB reduction
-        unsigned srcMod =
-            Opc == AMDGPU::V_SUB_F32_e64 || Opc == AMDGPU::V_SUB_F32_e32
-                ? SISrcMods::NEG
-                : SISrcMods::NONE;
+        unsigned srcMod = (Opc == AMDGPU::V_SUB_F32_e64 ||
+                           Opc == AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64)
+                              ? SISrcMods::NEG
+                              : SISrcMods::NONE;
         unsigned MulOpc = is32BitOpc    ? AMDGPU::V_MUL_F32_e64
                           : IsGFX12Plus ? AMDGPU::V_MUL_F64_e64_gfx12
                                         : AMDGPU::V_MUL_F64_e64;
@@ -6007,7 +6008,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
       case AMDGPU::V_MAX_F64_e64:
       case AMDGPU::V_MAX_NUM_F64_e64:
       case AMDGPU::V_ADD_F64_e64:
-      case AMDGPU::V_SUB_F32_e32: {
+      case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64: {
         int SrcIdx =
             AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src);
         const TargetRegisterClass *VregRC =
@@ -6023,9 +6024,11 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
         BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_MOV_B64_PSEUDO),
                 AccumulatorVReg)
             .addReg(Accumulator->getOperand(0).getReg());
-        unsigned Modifier =
-            Opc == AMDGPU::V_SUB_F32_e32 ? SISrcMods::NEG : SISrcMods::NONE;
-        Opc = Opc == AMDGPU::V_SUB_F32_e32 ? AMDGPU::V_ADD_F64_e64 : Opc;
+        unsigned Modifier = Opc == AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64
+                                ? SISrcMods::NEG
+                                : SISrcMods::NONE;
+        Opc = Opc == AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64 ? AMDGPU::V_ADD_F64_e64
+                                                         : Opc;
         auto DstVregInst = BuildMI(*ComputeLoop, I, DL, TII->get(Opc), DstVreg)
                                .addImm(Modifier) // src0 modifiers
                                .addReg(LaneValue->getOperand(0).getReg())
@@ -6148,7 +6151,8 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
   case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F32:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_SUB_F32_e64);
   case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64:
-    return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_SUB_F32_e32);
+    return lowerWaveReduce(MI, *BB, *getSubtarget(),
+                           AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64);
   case AMDGPU::WAVE_REDUCE_AND_PSEUDO_B32:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_AND_B32);
   case AMDGPU::WAVE_REDUCE_AND_PSEUDO_B64:

>From 9c496e0a3261f03ae625a37e230a3662c2399bc5 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Wed, 21 Jan 2026 15:23:57 +0530
Subject: [PATCH 07/10] Refactor code and add some comments

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 2d3d14cd833e5..86d4d5ad58246 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -6024,11 +6024,11 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
         BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_MOV_B64_PSEUDO),
                 AccumulatorVReg)
             .addReg(Accumulator->getOperand(0).getReg());
-        unsigned Modifier = Opc == AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64
-                                ? SISrcMods::NEG
-                                : SISrcMods::NONE;
-        Opc = Opc == AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64 ? AMDGPU::V_ADD_F64_e64
-                                                         : Opc;
+        unsigned Modifier = SISrcMods::NONE;
+        if (Opc == AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64) {
+          Opc = AMDGPU::V_ADD_F64_e64;
+          Modifier = SISrcMods::NEG;
+        }
         auto DstVregInst = BuildMI(*ComputeLoop, I, DL, TII->get(Opc), DstVreg)
                                .addImm(Modifier) // src0 modifiers
                                .addReg(LaneValue->getOperand(0).getReg())
@@ -6151,6 +6151,9 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
   case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F32:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_SUB_F32_e64);
   case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64:
+    // There is no S/V_SUB_F64 opcode, so use the Pseudo instruction as a filler
+    // value for the function argument. Double type subtraction is expanded by
+    // using the V_ADD_F64 opcode and setting the NEG bit.
     return lowerWaveReduce(MI, *BB, *getSubtarget(),
                            AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64);
   case AMDGPU::WAVE_REDUCE_AND_PSEUDO_B32:

>From 6b787bfcdc257ce4b10f0dfacc0b19256364143a Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Wed, 28 Jan 2026 09:42:16 +0530
Subject: [PATCH 08/10]     Avoid generation check in callee function

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 24 ++++++++++++++++-------
 1 file changed, 17 insertions(+), 7 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 86d4d5ad58246..a5272e94d7b9e 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5526,6 +5526,7 @@ static uint64_t getIdentityValueFor64BitWaveReduction(unsigned Opc) {
   case AMDGPU::S_AND_B64:
     return std::numeric_limits<uint64_t>::max();
   case AMDGPU::V_ADD_F64_e64:
+  case AMDGPU::V_ADD_F64_e64_gfx12:
     return 0x8000000000000000; // -0.0
   case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64:
     return 0x0; // +0.0
@@ -5549,8 +5550,8 @@ static bool isFloatingPointWaveReduceOperation(unsigned Opc) {
   return Opc == AMDGPU::V_MIN_F32_e64 || Opc == AMDGPU::V_MAX_F32_e64 ||
          Opc == AMDGPU::V_ADD_F32_e64 || Opc == AMDGPU::V_SUB_F32_e64 ||
          Opc == AMDGPU::V_MIN_F64_e64 || Opc == AMDGPU::V_MAX_F64_e64 ||
-         Opc == AMDGPU::V_MIN_NUM_F64_e64 || Opc == AMDGPU::V_MAX_NUM_F64_e64
-         Opc == AMDGPU::V_ADD_F64_e64 ||
+         Opc == AMDGPU::V_MIN_NUM_F64_e64 || Opc == AMDGPU::V_MAX_NUM_F64_e64 ||
+         Opc == AMDGPU::V_ADD_F64_e64 || Opc == AMDGPU::V_ADD_F64_e64_gfx12 ||
          Opc == AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64;
 }
 
@@ -5604,6 +5605,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
     case AMDGPU::S_ADD_U64_PSEUDO:
     case AMDGPU::V_ADD_F32_e64:
     case AMDGPU::V_ADD_F64_e64:
+    case AMDGPU::V_ADD_F64_e64_gfx12:
     case AMDGPU::S_SUB_I32:
     case AMDGPU::S_SUB_U64_PSEUDO:
     case AMDGPU::V_SUB_F32_e64:
@@ -5764,6 +5766,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
       }
       case AMDGPU::V_ADD_F32_e64:
       case AMDGPU::V_ADD_F64_e64:
+      case AMDGPU::V_ADD_F64_e64_gfx12:
       case AMDGPU::V_SUB_F32_e64:
       case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64: {
         bool is32BitOpc = is32bitWaveReduceOperation(Opc);
@@ -5785,9 +5788,10 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
                            Opc == AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64)
                               ? SISrcMods::NEG
                               : SISrcMods::NONE;
-        unsigned MulOpc = is32BitOpc    ? AMDGPU::V_MUL_F32_e64
-                          : IsGFX12Plus ? AMDGPU::V_MUL_F64_e64_gfx12
-                                        : AMDGPU::V_MUL_F64_e64;
+        unsigned MulOpc = is32BitOpc ? AMDGPU::V_MUL_F32_e64
+                          : ST.getGeneration() >= AMDGPUSubtarget::GFX12
+                              ? AMDGPU::V_MUL_F64_e64_gfx12
+                              : AMDGPU::V_MUL_F64_e64;
         auto DestVregInst = BuildMI(BB, MI, DL, TII->get(MulOpc),
                                     DstVreg)
                                 .addImm(srcMod) // src0 modifier
@@ -6008,6 +6012,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
       case AMDGPU::V_MAX_F64_e64:
       case AMDGPU::V_MAX_NUM_F64_e64:
       case AMDGPU::V_ADD_F64_e64:
+      case AMDGPU::V_ADD_F64_e64_gfx12:
       case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64: {
         int SrcIdx =
             AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src);
@@ -6026,7 +6031,9 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
             .addReg(Accumulator->getOperand(0).getReg());
         unsigned Modifier = SISrcMods::NONE;
         if (Opc == AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64) {
-          Opc = AMDGPU::V_ADD_F64_e64;
+          Opc = ST.getGeneration() >= AMDGPUSubtarget::GFX12
+                    ? AMDGPU::V_ADD_F64_e64_gfx12
+                    : AMDGPU::V_ADD_F64_e64;
           Modifier = SISrcMods::NEG;
         }
         auto DstVregInst = BuildMI(*ComputeLoop, I, DL, TII->get(Opc), DstVreg)
@@ -6143,7 +6150,10 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
   case AMDGPU::WAVE_REDUCE_FADD_PSEUDO_F32:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_ADD_F32_e64);
   case AMDGPU::WAVE_REDUCE_FADD_PSEUDO_F64:
-    return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_ADD_F64_e64);
+    return lowerWaveReduce(MI, *BB, *getSubtarget(),
+                           ST.getGeneration() >= AMDGPUSubtarget::GFX12
+                               ? AMDGPU::V_ADD_F64_e64_gfx12
+                               : AMDGPU::V_ADD_F64_e64);
   case AMDGPU::WAVE_REDUCE_SUB_PSEUDO_I32:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_SUB_I32);
   case AMDGPU::WAVE_REDUCE_SUB_PSEUDO_U64:

>From c8ab6b6ca382dd3aa6ba6440b61c375d75f33215 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Wed, 28 Jan 2026 10:02:15 +0530
Subject: [PATCH 09/10] use `v_mul_f64_pseudo_e64`

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp           | 2 +-
 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fadd.ll | 6 +++---
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index a5272e94d7b9e..f6c454c57a923 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5790,7 +5790,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
                               : SISrcMods::NONE;
         unsigned MulOpc = is32BitOpc ? AMDGPU::V_MUL_F32_e64
                           : ST.getGeneration() >= AMDGPUSubtarget::GFX12
-                              ? AMDGPU::V_MUL_F64_e64_gfx12
+                              ? AMDGPU::V_MUL_F64_pseudo_e64
                               : AMDGPU::V_MUL_F64_e64;
         auto DestVregInst = BuildMI(BB, MI, DL, TII->get(MulOpc),
                                     DstVreg)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fadd.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fadd.ll
index 50b97f099c9ca..ec9fbe4ebaef8 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fadd.ll
@@ -1229,7 +1229,7 @@ define amdgpu_kernel void @uniform_value_double(ptr addrspace(1) %out, double %i
 ; GFX12DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
 ; GFX12DAGISEL-NEXT:    s_wait_kmcnt 0x0
 ; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12DAGISEL-NEXT:    v_mul_f64_e64 v[0:1], s[2:3], v[0:1]
+; GFX12DAGISEL-NEXT:    v_mul_f64_e32 v[0:1], s[2:3], v[0:1]
 ; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s2, v0
 ; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
 ; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s3, v1
@@ -2080,7 +2080,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
 ; GFX12DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s6
 ; GFX12DAGISEL-NEXT:    s_wait_kmcnt 0x0
 ; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12DAGISEL-NEXT:    v_mul_f64_e64 v[0:1], s[2:3], v[0:1]
+; GFX12DAGISEL-NEXT:    v_mul_f64_e32 v[0:1], s[2:3], v[0:1]
 ; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s6, v0
 ; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s7, v1
@@ -2100,7 +2100,7 @@ define amdgpu_kernel void @divergent_cfg_double(ptr addrspace(1) %out, double %i
 ; GFX12DAGISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
 ; GFX12DAGISEL-NEXT:    v_cvt_f64_i32_e32 v[0:1], s3
 ; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12DAGISEL-NEXT:    v_mul_f64_e64 v[0:1], s[4:5], v[0:1]
+; GFX12DAGISEL-NEXT:    v_mul_f64_e32 v[0:1], s[4:5], v[0:1]
 ; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s4, v0
 ; GFX12DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX12DAGISEL-NEXT:    v_readfirstlane_b32 s5, v1

>From f74346f5ba6594038cd30add6e2d76065bf184bf Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Wed, 28 Jan 2026 12:25:05 +0530
Subject: [PATCH 10/10] Don't use the pseudo as a case label.

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 40 ++++++++++-------------
 1 file changed, 17 insertions(+), 23 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index f6c454c57a923..7830d3c923d5d 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5528,8 +5528,6 @@ static uint64_t getIdentityValueFor64BitWaveReduction(unsigned Opc) {
   case AMDGPU::V_ADD_F64_e64:
   case AMDGPU::V_ADD_F64_e64_gfx12:
     return 0x8000000000000000; // -0.0
-  case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64:
-    return 0x0; // +0.0
   default:
     llvm_unreachable(
         "Unexpected opcode in getIdentityValueFor64BitWaveReduction");
@@ -5551,8 +5549,7 @@ static bool isFloatingPointWaveReduceOperation(unsigned Opc) {
          Opc == AMDGPU::V_ADD_F32_e64 || Opc == AMDGPU::V_SUB_F32_e64 ||
          Opc == AMDGPU::V_MIN_F64_e64 || Opc == AMDGPU::V_MAX_F64_e64 ||
          Opc == AMDGPU::V_MIN_NUM_F64_e64 || Opc == AMDGPU::V_MAX_NUM_F64_e64 ||
-         Opc == AMDGPU::V_ADD_F64_e64 || Opc == AMDGPU::V_ADD_F64_e64_gfx12 ||
-         Opc == AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64;
+         Opc == AMDGPU::V_ADD_F64_e64 || Opc == AMDGPU::V_ADD_F64_e64_gfx12;
 }
 
 static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
@@ -5608,8 +5605,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
     case AMDGPU::V_ADD_F64_e64_gfx12:
     case AMDGPU::S_SUB_I32:
     case AMDGPU::S_SUB_U64_PSEUDO:
-    case AMDGPU::V_SUB_F32_e64:
-    case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64: {
+    case AMDGPU::V_SUB_F32_e64: {
       const TargetRegisterClass *WaveMaskRegClass = TRI->getWaveMaskRegClass();
       const TargetRegisterClass *DstRegClass = MRI.getRegClass(DstReg);
       Register ExecMask = MRI.createVirtualRegister(WaveMaskRegClass);
@@ -5767,8 +5763,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
       case AMDGPU::V_ADD_F32_e64:
       case AMDGPU::V_ADD_F64_e64:
       case AMDGPU::V_ADD_F64_e64_gfx12:
-      case AMDGPU::V_SUB_F32_e64:
-      case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64: {
+      case AMDGPU::V_SUB_F32_e64: {
         bool is32BitOpc = is32bitWaveReduceOperation(Opc);
         const TargetRegisterClass *VregRC =
             is32BitOpc ? &AMDGPU::VGPR_32RegClass : TRI->getVGPR64Class();
@@ -5784,10 +5779,11 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
             .addImm(0); // output-modifier
 
         // Take negation of input for SUB reduction
-        unsigned srcMod = (Opc == AMDGPU::V_SUB_F32_e64 ||
-                           Opc == AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64)
-                              ? SISrcMods::NEG
-                              : SISrcMods::NONE;
+        unsigned srcMod =
+            (Opc == AMDGPU::V_SUB_F32_e64 ||
+             MI.getOpcode() == AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64)
+                ? SISrcMods::NEG
+                : SISrcMods::NONE;
         unsigned MulOpc = is32BitOpc ? AMDGPU::V_MUL_F32_e64
                           : ST.getGeneration() >= AMDGPUSubtarget::GFX12
                               ? AMDGPU::V_MUL_F64_pseudo_e64
@@ -5878,6 +5874,8 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
           .addImm(IdentityValue);
     } else {
       uint64_t IdentityValue = getIdentityValueFor64BitWaveReduction(Opc);
+      if (MI.getOpcode() == AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64)
+        IdentityValue = 0x0; // +0.0 for double sub reduction
       BuildMI(BB, I, DL, TII->get(AMDGPU::S_MOV_B64_IMM_PSEUDO), IdentityValReg)
           .addImm(IdentityValue);
     }
@@ -6012,8 +6010,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
       case AMDGPU::V_MAX_F64_e64:
       case AMDGPU::V_MAX_NUM_F64_e64:
       case AMDGPU::V_ADD_F64_e64:
-      case AMDGPU::V_ADD_F64_e64_gfx12:
-      case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64: {
+      case AMDGPU::V_ADD_F64_e64_gfx12: {
         int SrcIdx =
             AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src);
         const TargetRegisterClass *VregRC =
@@ -6030,12 +6027,8 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
                 AccumulatorVReg)
             .addReg(Accumulator->getOperand(0).getReg());
         unsigned Modifier = SISrcMods::NONE;
-        if (Opc == AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64) {
-          Opc = ST.getGeneration() >= AMDGPUSubtarget::GFX12
-                    ? AMDGPU::V_ADD_F64_e64_gfx12
-                    : AMDGPU::V_ADD_F64_e64;
+        if (MI.getOpcode() == AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64)
           Modifier = SISrcMods::NEG;
-        }
         auto DstVregInst = BuildMI(*ComputeLoop, I, DL, TII->get(Opc), DstVreg)
                                .addImm(Modifier) // src0 modifiers
                                .addReg(LaneValue->getOperand(0).getReg())
@@ -6161,11 +6154,12 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
   case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F32:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_SUB_F32_e64);
   case AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64:
-    // There is no S/V_SUB_F64 opcode, so use the Pseudo instruction as a filler
-    // value for the function argument. Double type subtraction is expanded by
-    // using the V_ADD_F64 opcode and setting the NEG bit.
+    // There is no S/V_SUB_F64 opcode. Double type subtraction is expanded as
+    // fadd + neg, by setting the NEG bit in the instruction.
     return lowerWaveReduce(MI, *BB, *getSubtarget(),
-                           AMDGPU::WAVE_REDUCE_FSUB_PSEUDO_F64);
+                           ST.getGeneration() >= AMDGPUSubtarget::GFX12
+                               ? AMDGPU::V_ADD_F64_e64_gfx12
+                               : AMDGPU::V_ADD_F64_e64);
   case AMDGPU::WAVE_REDUCE_AND_PSEUDO_B32:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_AND_B32);
   case AMDGPU::WAVE_REDUCE_AND_PSEUDO_B64:



More information about the llvm-branch-commits mailing list