[llvm] [AMDGPU] Extending wave reduction intrinsics for `i64` types - 3 (PR #151310)

via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 10 06:02:13 PDT 2025


https://github.com/easyonaadit updated https://github.com/llvm/llvm-project/pull/151310

>From 5fa53924fcf5ad6083185d332349354a6cc074eb Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Sat, 19 Jul 2025 12:48:18 +0530
Subject: [PATCH 01/12] [AMDGPU] Extending wave reduction intrinsics for `i64`
 types - 1

Supporting Min/Max Operations: `min`, `max`, `umin`, `umax`
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  12 +-
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll  |  92 ++++++++++++++++
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll  |  92 ++++++++++++++++
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll | 103 ++++++++++++++++++
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll |  92 ++++++++++++++++
 5 files changed, 390 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 4bb189e1cf09b..72a5510de1a49 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5324,12 +5324,16 @@ static MachineBasicBlock *Expand64BitScalarArithmetic(MachineInstr &MI,
 static uint32_t getIdentityValueFor32BitWaveReduction(unsigned Opc) {
   switch (Opc) {
   case AMDGPU::S_MIN_U32:
+  case AMDGPU::V_CMP_LT_U64_e64: // umin.u64
     return std::numeric_limits<uint32_t>::max();
   case AMDGPU::S_MIN_I32:
+  case AMDGPU::V_CMP_LT_I64_e64: // min.i64
     return std::numeric_limits<int32_t>::max();
   case AMDGPU::S_MAX_U32:
+  case AMDGPU::V_CMP_GT_U64_e64: // umax.u64
     return std::numeric_limits<uint32_t>::min();
   case AMDGPU::S_MAX_I32:
+  case AMDGPU::V_CMP_GT_I64_e64: // max.i64
     return std::numeric_limits<int32_t>::min();
   case AMDGPU::S_ADD_I32:
   case AMDGPU::S_SUB_I32:
@@ -5385,16 +5389,22 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
   bool isSGPR = TRI->isSGPRClass(MRI.getRegClass(SrcReg));
   Register DstReg = MI.getOperand(0).getReg();
   MachineBasicBlock *RetBB = nullptr;
+  bool is32BitOpc = TRI->getRegSizeInBits(*MRI.getRegClass(DstReg)) == 32;
   if (isSGPR) {
     switch (Opc) {
     case AMDGPU::S_MIN_U32:
+    case AMDGPU::V_CMP_LT_U64_e64: /*umin*/
     case AMDGPU::S_MIN_I32:
+    case AMDGPU::V_CMP_LT_I64_e64: /*min*/
     case AMDGPU::S_MAX_U32:
+    case AMDGPU::V_CMP_GT_U64_e64: /*umax*/
     case AMDGPU::S_MAX_I32:
+    case AMDGPU::V_CMP_GT_I64_e64: /*max*/
     case AMDGPU::S_AND_B32:
     case AMDGPU::S_OR_B32: {
       // Idempotent operations.
-      BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MOV_B32), DstReg).addReg(SrcReg);
+      unsigned movOpc = is32BitOpc ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
+      BuildMI(BB, MI, DL, TII->get(movOpc), DstReg).addReg(SrcReg);
       RetBB = &BB;
       break;
     }
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
index ace65a03a5abb..ae727d4f3f713 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
@@ -1269,9 +1269,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-LABEL: divergent_value_i64:
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
 ; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX8DAGISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX8DAGISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT:    s_brev_b32 s5, 1
+; GFX8DAGISEL-NEXT:    s_mov_b32 s4, 0
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1294,9 +1300,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL-LABEL: divergent_value_i64:
 ; GFX8GISEL:       ; %bb.0: ; %entry
 ; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX8GISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX8GISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT:    s_brev_b32 s5, 1
+; GFX8GISEL-NEXT:    s_mov_b32 s4, 0
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX8GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1319,9 +1331,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL-LABEL: divergent_value_i64:
 ; GFX9DAGISEL:       ; %bb.0: ; %entry
 ; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX9DAGISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX9DAGISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT:    s_brev_b32 s5, 1
+; GFX9DAGISEL-NEXT:    s_mov_b32 s4, 0
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1344,9 +1362,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL-LABEL: divergent_value_i64:
 ; GFX9GISEL:       ; %bb.0: ; %entry
 ; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX9GISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX9GISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT:    s_brev_b32 s5, 1
+; GFX9GISEL-NEXT:    s_mov_b32 s4, 0
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX9GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1369,9 +1393,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL-LABEL: divergent_value_i64:
 ; GFX1064DAGISEL:       ; %bb.0: ; %entry
 ; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX1064DAGISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT:    s_brev_b32 s5, 1
+; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, 0
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1393,9 +1423,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL-LABEL: divergent_value_i64:
 ; GFX1064GISEL:       ; %bb.0: ; %entry
 ; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1064GISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX1064GISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT:    s_brev_b32 s5, 1
+; GFX1064GISEL-NEXT:    s_mov_b32 s4, 0
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX1064GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1417,9 +1453,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL-LABEL: divergent_value_i64:
 ; GFX1032DAGISEL:       ; %bb.0: ; %entry
 ; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX1032DAGISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+=======
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT:    s_brev_b32 s5, 1
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, 0
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1441,9 +1483,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL-LABEL: divergent_value_i64:
 ; GFX1032GISEL:       ; %bb.0: ; %entry
 ; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1032GISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX1032GISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+=======
+; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT:    s_brev_b32 s5, 1
+; GFX1032GISEL-NEXT:    s_mov_b32 s4, 0
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1465,16 +1513,27 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL-LABEL: divergent_value_i64:
 ; GFX1164DAGISEL:       ; %bb.0: ; %entry
 ; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, 0
 ; GFX1164DAGISEL-NEXT:    s_brev_b32 s1, 1
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+=======
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT:    s_brev_b32 s1, 1
+; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s4, v2, s8
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v3, s8
+<<<<<<< HEAD
+=======
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1164DAGISEL-NEXT:    v_cmp_gt_i64_e32 vcc, s[4:5], v[4:5]
 ; GFX1164DAGISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
 ; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s8
@@ -1490,16 +1549,27 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL-LABEL: divergent_value_i64:
 ; GFX1164GISEL:       ; %bb.0: ; %entry
 ; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1164GISEL-NEXT:    s_mov_b32 s0, 0
 ; GFX1164GISEL-NEXT:    s_brev_b32 s1, 1
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+=======
+; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT:    s_brev_b32 s1, 1
+; GFX1164GISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s4, v2, s8
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v3, s8
+<<<<<<< HEAD
+=======
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1164GISEL-NEXT:    v_cmp_gt_i64_e32 vcc, s[4:5], v[4:5]
 ; GFX1164GISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
 ; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s8
@@ -1515,15 +1585,26 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL-LABEL: divergent_value_i64:
 ; GFX1132DAGISEL:       ; %bb.0: ; %entry
 ; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, 0
 ; GFX1132DAGISEL-NEXT:    s_brev_b32 s1, 1
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+=======
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT:    s_brev_b32 s1, 1
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
 ; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
+<<<<<<< HEAD
+=======
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1132DAGISEL-NEXT:    v_cmp_gt_i64_e32 vcc_lo, s[4:5], v[4:5]
 ; GFX1132DAGISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
 ; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -1538,15 +1619,26 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-LABEL: divergent_value_i64:
 ; GFX1132GISEL:       ; %bb.0: ; %entry
 ; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1132GISEL-NEXT:    s_mov_b32 s0, 0
 ; GFX1132GISEL-NEXT:    s_brev_b32 s1, 1
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+=======
+; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT:    s_brev_b32 s1, 1
+; GFX1132GISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
+<<<<<<< HEAD
+=======
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1132GISEL-NEXT:    v_cmp_gt_i64_e32 vcc_lo, s[4:5], v[4:5]
 ; GFX1132GISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
 ; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
index b12537eb0cebe..ebfc9e433a3d8 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
@@ -1269,9 +1269,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-LABEL: divergent_value_i64:
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
 ; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX8DAGISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX8DAGISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT:    s_brev_b32 s5, -2
+; GFX8DAGISEL-NEXT:    s_mov_b32 s4, -1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1294,9 +1300,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL-LABEL: divergent_value_i64:
 ; GFX8GISEL:       ; %bb.0: ; %entry
 ; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX8GISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX8GISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT:    s_brev_b32 s5, -2
+; GFX8GISEL-NEXT:    s_mov_b32 s4, -1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX8GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1319,9 +1331,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL-LABEL: divergent_value_i64:
 ; GFX9DAGISEL:       ; %bb.0: ; %entry
 ; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX9DAGISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX9DAGISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT:    s_brev_b32 s5, -2
+; GFX9DAGISEL-NEXT:    s_mov_b32 s4, -1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1344,9 +1362,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL-LABEL: divergent_value_i64:
 ; GFX9GISEL:       ; %bb.0: ; %entry
 ; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX9GISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX9GISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT:    s_brev_b32 s5, -2
+; GFX9GISEL-NEXT:    s_mov_b32 s4, -1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX9GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1369,9 +1393,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL-LABEL: divergent_value_i64:
 ; GFX1064DAGISEL:       ; %bb.0: ; %entry
 ; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX1064DAGISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT:    s_brev_b32 s5, -2
+; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, -1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1393,9 +1423,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL-LABEL: divergent_value_i64:
 ; GFX1064GISEL:       ; %bb.0: ; %entry
 ; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1064GISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX1064GISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT:    s_brev_b32 s5, -2
+; GFX1064GISEL-NEXT:    s_mov_b32 s4, -1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX1064GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1417,9 +1453,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL-LABEL: divergent_value_i64:
 ; GFX1032DAGISEL:       ; %bb.0: ; %entry
 ; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX1032DAGISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+=======
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT:    s_brev_b32 s5, -2
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, -1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1441,9 +1483,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL-LABEL: divergent_value_i64:
 ; GFX1032GISEL:       ; %bb.0: ; %entry
 ; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1032GISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX1032GISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+=======
+; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT:    s_brev_b32 s5, -2
+; GFX1032GISEL-NEXT:    s_mov_b32 s4, -1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1465,16 +1513,27 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL-LABEL: divergent_value_i64:
 ; GFX1164DAGISEL:       ; %bb.0: ; %entry
 ; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, -1
 ; GFX1164DAGISEL-NEXT:    s_brev_b32 s1, -2
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+=======
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT:    s_brev_b32 s1, -2
+; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s4, v2, s8
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v3, s8
+<<<<<<< HEAD
+=======
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1164DAGISEL-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[4:5]
 ; GFX1164DAGISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
 ; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s8
@@ -1490,16 +1549,27 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL-LABEL: divergent_value_i64:
 ; GFX1164GISEL:       ; %bb.0: ; %entry
 ; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1164GISEL-NEXT:    s_mov_b32 s0, -1
 ; GFX1164GISEL-NEXT:    s_brev_b32 s1, -2
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+=======
+; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT:    s_brev_b32 s1, -2
+; GFX1164GISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s4, v2, s8
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v3, s8
+<<<<<<< HEAD
+=======
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1164GISEL-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[4:5]
 ; GFX1164GISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
 ; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s8
@@ -1515,15 +1585,26 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL-LABEL: divergent_value_i64:
 ; GFX1132DAGISEL:       ; %bb.0: ; %entry
 ; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, -1
 ; GFX1132DAGISEL-NEXT:    s_brev_b32 s1, -2
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+=======
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT:    s_brev_b32 s1, -2
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
 ; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
+<<<<<<< HEAD
+=======
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1132DAGISEL-NEXT:    v_cmp_lt_i64_e32 vcc_lo, s[4:5], v[4:5]
 ; GFX1132DAGISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
 ; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -1538,15 +1619,26 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-LABEL: divergent_value_i64:
 ; GFX1132GISEL:       ; %bb.0: ; %entry
 ; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1132GISEL-NEXT:    s_mov_b32 s0, -1
 ; GFX1132GISEL-NEXT:    s_brev_b32 s1, -2
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+=======
+; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT:    s_brev_b32 s1, -2
+; GFX1132GISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
+<<<<<<< HEAD
+=======
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1132GISEL-NEXT:    v_cmp_lt_i64_e32 vcc_lo, s[4:5], v[4:5]
 ; GFX1132GISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
 ; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
index 1f848d577d2a4..2bcd0c2232a1b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
@@ -1194,6 +1194,7 @@ entry:
   ret void
 }
 
+<<<<<<< HEAD
 define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-LABEL: divergent_value_i64:
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
@@ -1474,6 +1475,108 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
 ; GFX1132GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX1132GISEL-NEXT:    s_setpc_b64 s[30:31]
+=======
+define amdgpu_kernel void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
+; GFX8DAGISEL-LABEL: divergent_value_i64:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT:    s_endpgm
+;
+; GFX8GISEL-LABEL: divergent_value_i64:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT:    s_endpgm
+;
+; GFX9DAGISEL-LABEL: divergent_value_i64:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT:    s_endpgm
+;
+; GFX9GISEL-LABEL: divergent_value_i64:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT:    s_endpgm
+;
+; GFX10DAGISEL-LABEL: divergent_value_i64:
+; GFX10DAGISEL:       ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX10DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX10DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT:    s_endpgm
+;
+; GFX10GISEL-LABEL: divergent_value_i64:
+; GFX10GISEL:       ; %bb.0: ; %entry
+; GFX10GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX10GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX10GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT:    s_endpgm
+;
+; GFX1164DAGISEL-LABEL: divergent_value_i64:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT:    s_endpgm
+;
+; GFX1164GISEL-LABEL: divergent_value_i64:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT:    s_endpgm
+;
+; GFX1132DAGISEL-LABEL: divergent_value_i64:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT:    s_endpgm
+;
+; GFX1132GISEL-LABEL: divergent_value_i64:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT:    s_endpgm
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.umax.i64(i64 %id.x, i32 1)
   store i64 %result, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
index c2cfb8828c30c..e641eeec4353c 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
@@ -1198,8 +1198,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-LABEL: divergent_value_i64:
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
 ; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX8DAGISEL-NEXT:    s_mov_b32 s4, -1
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1222,8 +1228,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL-LABEL: divergent_value_i64:
 ; GFX8GISEL:       ; %bb.0: ; %entry
 ; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX8GISEL-NEXT:    s_mov_b32 s4, -1
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX8GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1246,8 +1258,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL-LABEL: divergent_value_i64:
 ; GFX9DAGISEL:       ; %bb.0: ; %entry
 ; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX9DAGISEL-NEXT:    s_mov_b32 s4, -1
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1270,8 +1288,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL-LABEL: divergent_value_i64:
 ; GFX9GISEL:       ; %bb.0: ; %entry
 ; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX9GISEL-NEXT:    s_mov_b32 s4, -1
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX9GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1294,8 +1318,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL-LABEL: divergent_value_i64:
 ; GFX1064DAGISEL:       ; %bb.0: ; %entry
 ; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, -1
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1317,8 +1347,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL-LABEL: divergent_value_i64:
 ; GFX1064GISEL:       ; %bb.0: ; %entry
 ; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX1064GISEL-NEXT:    s_mov_b32 s4, -1
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX1064GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1340,8 +1376,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL-LABEL: divergent_value_i64:
 ; GFX1032DAGISEL:       ; %bb.0: ; %entry
 ; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+=======
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, -1
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1363,8 +1405,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL-LABEL: divergent_value_i64:
 ; GFX1032GISEL:       ; %bb.0: ; %entry
 ; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+=======
+; GFX1032GISEL-NEXT:    s_mov_b32 s4, -1
+; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1386,15 +1434,26 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL-LABEL: divergent_value_i64:
 ; GFX1164DAGISEL:       ; %bb.0: ; %entry
 ; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], -1
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+=======
+; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s4, v2, s8
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v3, s8
+<<<<<<< HEAD
+=======
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1164DAGISEL-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[4:5]
 ; GFX1164DAGISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
 ; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s8
@@ -1410,15 +1469,26 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL-LABEL: divergent_value_i64:
 ; GFX1164GISEL:       ; %bb.0: ; %entry
 ; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], -1
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+=======
+; GFX1164GISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s4, v2, s8
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v3, s8
+<<<<<<< HEAD
+=======
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1164GISEL-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[4:5]
 ; GFX1164GISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
 ; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s8
@@ -1434,14 +1504,25 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL-LABEL: divergent_value_i64:
 ; GFX1132DAGISEL:       ; %bb.0: ; %entry
 ; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], -1
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+=======
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
 ; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
+<<<<<<< HEAD
+=======
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1132DAGISEL-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[4:5], v[4:5]
 ; GFX1132DAGISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
 ; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -1456,14 +1537,25 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-LABEL: divergent_value_i64:
 ; GFX1132GISEL:       ; %bb.0: ; %entry
 ; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], -1
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+=======
+; GFX1132GISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
+<<<<<<< HEAD
+=======
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
 ; GFX1132GISEL-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[4:5], v[4:5]
 ; GFX1132GISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
 ; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3

>From cb7ca6ea8ace1168442bbea35ccdb15f33ae2bb3 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Wed, 13 Aug 2025 11:28:13 +0530
Subject: [PATCH 02/12] Addressing Review Comments

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 16 ++++++++++------
 1 file changed, 10 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 72a5510de1a49..bb3023114dfa3 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5393,18 +5393,22 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
   if (isSGPR) {
     switch (Opc) {
     case AMDGPU::S_MIN_U32:
-    case AMDGPU::V_CMP_LT_U64_e64: /*umin*/
     case AMDGPU::S_MIN_I32:
-    case AMDGPU::V_CMP_LT_I64_e64: /*min*/
     case AMDGPU::S_MAX_U32:
-    case AMDGPU::V_CMP_GT_U64_e64: /*umax*/
     case AMDGPU::S_MAX_I32:
-    case AMDGPU::V_CMP_GT_I64_e64: /*max*/
     case AMDGPU::S_AND_B32:
     case AMDGPU::S_OR_B32: {
       // Idempotent operations.
-      unsigned movOpc = is32BitOpc ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
-      BuildMI(BB, MI, DL, TII->get(movOpc), DstReg).addReg(SrcReg);
+      BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MOV_B32), DstReg).addReg(SrcReg);
+      RetBB = &BB;
+      break;
+    }
+    case AMDGPU::V_CMP_LT_U64_e64: // umin
+    case AMDGPU::V_CMP_LT_I64_e64: // min
+    case AMDGPU::V_CMP_GT_U64_e64: // umax
+    case AMDGPU::V_CMP_GT_I64_e64: { // max
+      // Idempotent operations.
+      BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MOV_B64), DstReg).addReg(SrcReg);
       RetBB = &BB;
       break;
     }

>From 11c9ebc8f57a1a63cb8f94d69d5805d48ce1945a Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Wed, 20 Aug 2025 11:56:22 +0530
Subject: [PATCH 03/12] Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with
 legality concerns.

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  11 +-
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll  |  96 +++++
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll  |  96 +++++
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll | 327 ++++++++++++++----
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll |  88 ++++-
 5 files changed, 539 insertions(+), 79 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index bb3023114dfa3..8e02177bf23b2 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5324,16 +5324,12 @@ static MachineBasicBlock *Expand64BitScalarArithmetic(MachineInstr &MI,
 static uint32_t getIdentityValueFor32BitWaveReduction(unsigned Opc) {
   switch (Opc) {
   case AMDGPU::S_MIN_U32:
-  case AMDGPU::V_CMP_LT_U64_e64: // umin.u64
     return std::numeric_limits<uint32_t>::max();
   case AMDGPU::S_MIN_I32:
-  case AMDGPU::V_CMP_LT_I64_e64: // min.i64
     return std::numeric_limits<int32_t>::max();
   case AMDGPU::S_MAX_U32:
-  case AMDGPU::V_CMP_GT_U64_e64: // umax.u64
     return std::numeric_limits<uint32_t>::min();
   case AMDGPU::S_MAX_I32:
-  case AMDGPU::V_CMP_GT_I64_e64: // max.i64
     return std::numeric_limits<int32_t>::min();
   case AMDGPU::S_ADD_I32:
   case AMDGPU::S_SUB_I32:
@@ -5389,7 +5385,6 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
   bool isSGPR = TRI->isSGPRClass(MRI.getRegClass(SrcReg));
   Register DstReg = MI.getOperand(0).getReg();
   MachineBasicBlock *RetBB = nullptr;
-  bool is32BitOpc = TRI->getRegSizeInBits(*MRI.getRegClass(DstReg)) == 32;
   if (isSGPR) {
     switch (Opc) {
     case AMDGPU::S_MIN_U32:
@@ -5403,9 +5398,9 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
       RetBB = &BB;
       break;
     }
-    case AMDGPU::V_CMP_LT_U64_e64: // umin
-    case AMDGPU::V_CMP_LT_I64_e64: // min
-    case AMDGPU::V_CMP_GT_U64_e64: // umax
+    case AMDGPU::V_CMP_LT_U64_e64:   // umin
+    case AMDGPU::V_CMP_LT_I64_e64:   // min
+    case AMDGPU::V_CMP_GT_U64_e64:   // umax
     case AMDGPU::V_CMP_GT_I64_e64: { // max
       // Idempotent operations.
       BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MOV_B64), DstReg).addReg(SrcReg);
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
index ae727d4f3f713..f381a82660491 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
@@ -1270,6 +1270,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
 ; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX8DAGISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX8DAGISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
@@ -1278,6 +1279,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX8DAGISEL-NEXT:    s_mov_b32 s4, 0
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX8DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX8DAGISEL-NEXT:    s_brev_b32 s5, 1
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1301,6 +1307,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL:       ; %bb.0: ; %entry
 ; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX8GISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX8GISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
@@ -1309,6 +1316,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX8GISEL-NEXT:    s_mov_b32 s4, 0
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX8GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX8GISEL-NEXT:    s_brev_b32 s5, 1
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX8GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1332,6 +1344,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL:       ; %bb.0: ; %entry
 ; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX9DAGISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX9DAGISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
@@ -1340,6 +1353,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX9DAGISEL-NEXT:    s_mov_b32 s4, 0
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX9DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX9DAGISEL-NEXT:    s_brev_b32 s5, 1
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1363,6 +1381,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL:       ; %bb.0: ; %entry
 ; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX9GISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX9GISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
@@ -1371,6 +1390,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX9GISEL-NEXT:    s_mov_b32 s4, 0
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX9GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX9GISEL-NEXT:    s_brev_b32 s5, 1
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX9GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1394,6 +1418,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL:       ; %bb.0: ; %entry
 ; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX1064DAGISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
@@ -1402,6 +1427,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, 0
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1064DAGISEL-NEXT:    s_brev_b32 s5, 1
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1424,6 +1454,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL:       ; %bb.0: ; %entry
 ; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1064GISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX1064GISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
@@ -1432,6 +1463,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX1064GISEL-NEXT:    s_mov_b32 s4, 0
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1064GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1064GISEL-NEXT:    s_brev_b32 s5, 1
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX1064GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1454,6 +1490,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL:       ; %bb.0: ; %entry
 ; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX1032DAGISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
@@ -1462,6 +1499,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, 0
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1032DAGISEL-NEXT:    s_brev_b32 s5, 1
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1484,6 +1526,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL:       ; %bb.0: ; %entry
 ; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1032GISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX1032GISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
@@ -1492,6 +1535,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX1032GISEL-NEXT:    s_mov_b32 s4, 0
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1032GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1032GISEL-NEXT:    s_brev_b32 s5, 1
+; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1514,6 +1562,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL:       ; %bb.0: ; %entry
 ; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, 0
 ; GFX1164DAGISEL-NEXT:    s_brev_b32 s1, 1
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
@@ -1522,18 +1571,29 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 =======
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164DAGISEL-NEXT:    s_brev_b32 s1, 1
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1164DAGISEL-NEXT:    s_brev_b32 s1, 1
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s4, v2, s8
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v3, s8
 <<<<<<< HEAD
+<<<<<<< HEAD
 =======
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164DAGISEL-NEXT:    v_cmp_gt_i64_e32 vcc, s[4:5], v[4:5]
 ; GFX1164DAGISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
 ; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s8
@@ -1550,6 +1610,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL:       ; %bb.0: ; %entry
 ; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1164GISEL-NEXT:    s_mov_b32 s0, 0
 ; GFX1164GISEL-NEXT:    s_brev_b32 s1, 1
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
@@ -1558,18 +1619,29 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 =======
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164GISEL-NEXT:    s_brev_b32 s1, 1
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164GISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1164GISEL-NEXT:    s_brev_b32 s1, 1
+; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s4, v2, s8
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v3, s8
 <<<<<<< HEAD
+<<<<<<< HEAD
 =======
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164GISEL-NEXT:    v_cmp_gt_i64_e32 vcc, s[4:5], v[4:5]
 ; GFX1164GISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
 ; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s8
@@ -1586,6 +1658,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL:       ; %bb.0: ; %entry
 ; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, 0
 ; GFX1132DAGISEL-NEXT:    s_brev_b32 s1, 1
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
@@ -1594,17 +1667,28 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 =======
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132DAGISEL-NEXT:    s_brev_b32 s1, 1
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1132DAGISEL-NEXT:    s_brev_b32 s1, 1
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
 ; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
 <<<<<<< HEAD
+<<<<<<< HEAD
 =======
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132DAGISEL-NEXT:    v_cmp_gt_i64_e32 vcc_lo, s[4:5], v[4:5]
 ; GFX1132DAGISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
 ; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -1620,6 +1704,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL:       ; %bb.0: ; %entry
 ; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1132GISEL-NEXT:    s_mov_b32 s0, 0
 ; GFX1132GISEL-NEXT:    s_brev_b32 s1, 1
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
@@ -1628,17 +1713,28 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 =======
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132GISEL-NEXT:    s_brev_b32 s1, 1
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132GISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1132GISEL-NEXT:    s_brev_b32 s1, 1
+; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
 <<<<<<< HEAD
+<<<<<<< HEAD
 =======
 ; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132GISEL-NEXT:    v_cmp_gt_i64_e32 vcc_lo, s[4:5], v[4:5]
 ; GFX1132GISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
 ; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
index ebfc9e433a3d8..7a83d7fa5ced2 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
@@ -1270,6 +1270,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
 ; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX8DAGISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX8DAGISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
@@ -1278,6 +1279,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX8DAGISEL-NEXT:    s_mov_b32 s4, -1
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX8DAGISEL-NEXT:    s_mov_b32 s4, -1
+; GFX8DAGISEL-NEXT:    s_brev_b32 s5, -2
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1301,6 +1307,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL:       ; %bb.0: ; %entry
 ; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX8GISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX8GISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
@@ -1309,6 +1316,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX8GISEL-NEXT:    s_mov_b32 s4, -1
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX8GISEL-NEXT:    s_mov_b32 s4, -1
+; GFX8GISEL-NEXT:    s_brev_b32 s5, -2
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX8GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1332,6 +1344,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL:       ; %bb.0: ; %entry
 ; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX9DAGISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX9DAGISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
@@ -1340,6 +1353,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX9DAGISEL-NEXT:    s_mov_b32 s4, -1
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX9DAGISEL-NEXT:    s_mov_b32 s4, -1
+; GFX9DAGISEL-NEXT:    s_brev_b32 s5, -2
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1363,6 +1381,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL:       ; %bb.0: ; %entry
 ; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX9GISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX9GISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
@@ -1371,6 +1390,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX9GISEL-NEXT:    s_mov_b32 s4, -1
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX9GISEL-NEXT:    s_mov_b32 s4, -1
+; GFX9GISEL-NEXT:    s_brev_b32 s5, -2
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX9GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1394,6 +1418,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL:       ; %bb.0: ; %entry
 ; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX1064DAGISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
@@ -1402,6 +1427,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, -1
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, -1
+; GFX1064DAGISEL-NEXT:    s_brev_b32 s5, -2
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1424,6 +1454,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL:       ; %bb.0: ; %entry
 ; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1064GISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX1064GISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
@@ -1432,6 +1463,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX1064GISEL-NEXT:    s_mov_b32 s4, -1
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1064GISEL-NEXT:    s_mov_b32 s4, -1
+; GFX1064GISEL-NEXT:    s_brev_b32 s5, -2
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX1064GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1454,6 +1490,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL:       ; %bb.0: ; %entry
 ; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX1032DAGISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
@@ -1462,6 +1499,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, -1
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, -1
+; GFX1032DAGISEL-NEXT:    s_brev_b32 s5, -2
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1484,6 +1526,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL:       ; %bb.0: ; %entry
 ; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1032GISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX1032GISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
@@ -1492,6 +1535,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX1032GISEL-NEXT:    s_mov_b32 s4, -1
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1032GISEL-NEXT:    s_mov_b32 s4, -1
+; GFX1032GISEL-NEXT:    s_brev_b32 s5, -2
+; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1514,6 +1562,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL:       ; %bb.0: ; %entry
 ; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, -1
 ; GFX1164DAGISEL-NEXT:    s_brev_b32 s1, -2
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
@@ -1522,18 +1571,29 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 =======
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164DAGISEL-NEXT:    s_brev_b32 s1, -2
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1164DAGISEL-NEXT:    s_brev_b32 s1, -2
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s4, v2, s8
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v3, s8
 <<<<<<< HEAD
+<<<<<<< HEAD
 =======
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164DAGISEL-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[4:5]
 ; GFX1164DAGISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
 ; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s8
@@ -1550,6 +1610,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL:       ; %bb.0: ; %entry
 ; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1164GISEL-NEXT:    s_mov_b32 s0, -1
 ; GFX1164GISEL-NEXT:    s_brev_b32 s1, -2
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
@@ -1558,18 +1619,29 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 =======
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164GISEL-NEXT:    s_brev_b32 s1, -2
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164GISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1164GISEL-NEXT:    s_brev_b32 s1, -2
+; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s4, v2, s8
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v3, s8
 <<<<<<< HEAD
+<<<<<<< HEAD
 =======
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164GISEL-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[4:5]
 ; GFX1164GISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
 ; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s8
@@ -1586,6 +1658,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL:       ; %bb.0: ; %entry
 ; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, -1
 ; GFX1132DAGISEL-NEXT:    s_brev_b32 s1, -2
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
@@ -1594,17 +1667,28 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 =======
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132DAGISEL-NEXT:    s_brev_b32 s1, -2
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1132DAGISEL-NEXT:    s_brev_b32 s1, -2
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
 ; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
 <<<<<<< HEAD
+<<<<<<< HEAD
 =======
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132DAGISEL-NEXT:    v_cmp_lt_i64_e32 vcc_lo, s[4:5], v[4:5]
 ; GFX1132DAGISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
 ; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -1620,6 +1704,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL:       ; %bb.0: ; %entry
 ; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1132GISEL-NEXT:    s_mov_b32 s0, -1
 ; GFX1132GISEL-NEXT:    s_brev_b32 s1, -2
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
@@ -1628,17 +1713,28 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 =======
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132GISEL-NEXT:    s_brev_b32 s1, -2
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132GISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1132GISEL-NEXT:    s_brev_b32 s1, -2
+; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
 <<<<<<< HEAD
+<<<<<<< HEAD
 =======
 ; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132GISEL-NEXT:    v_cmp_lt_i64_e32 vcc_lo, s[4:5], v[4:5]
 ; GFX1132GISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
 ; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
index 2bcd0c2232a1b..3fc5bb103873b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
@@ -1194,6 +1194,7 @@ entry:
   ret void
 }
 
+<<<<<<< HEAD
 <<<<<<< HEAD
 define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-LABEL: divergent_value_i64:
@@ -1477,99 +1478,270 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-NEXT:    s_setpc_b64 s[30:31]
 =======
 define amdgpu_kernel void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
+=======
+define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX8DAGISEL-LABEL: divergent_value_i64:
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
-; GFX8DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s2
-; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s0
-; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s1
-; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v5, s5
+; GFX8DAGISEL-NEXT:    v_readlane_b32 s8, v2, s12
+; GFX8DAGISEL-NEXT:    v_readlane_b32 s9, v3, s12
+; GFX8DAGISEL-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX8DAGISEL-NEXT:    s_and_b64 s[10:11], vcc, s[6:7]
+; GFX8DAGISEL-NEXT:    s_bitset0_b64 s[6:7], s12
+; GFX8DAGISEL-NEXT:    s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX8DAGISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX8DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX8DAGISEL-NEXT:  ; %bb.2:
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
 ; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
-; GFX8DAGISEL-NEXT:    s_endpgm
+; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX8DAGISEL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8GISEL-LABEL: divergent_value_i64:
 ; GFX8GISEL:       ; %bb.0: ; %entry
-; GFX8GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s2
-; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s1
-; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, s3
-; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GFX8GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX8GISEL-NEXT:    s_endpgm
+; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v4, s4
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v5, s5
+; GFX8GISEL-NEXT:    v_readlane_b32 s8, v2, s12
+; GFX8GISEL-NEXT:    v_readlane_b32 s9, v3, s12
+; GFX8GISEL-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX8GISEL-NEXT:    s_and_b64 s[10:11], vcc, s[6:7]
+; GFX8GISEL-NEXT:    s_bitset0_b64 s[6:7], s12
+; GFX8GISEL-NEXT:    s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX8GISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX8GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX8GISEL-NEXT:  ; %bb.2:
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX8GISEL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9DAGISEL-LABEL: divergent_value_i64:
 ; GFX9DAGISEL:       ; %bb.0: ; %entry
-; GFX9DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
-; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
-; GFX9DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
-; GFX9DAGISEL-NEXT:    s_endpgm
+; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v5, s5
+; GFX9DAGISEL-NEXT:    v_readlane_b32 s8, v2, s12
+; GFX9DAGISEL-NEXT:    v_readlane_b32 s9, v3, s12
+; GFX9DAGISEL-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX9DAGISEL-NEXT:    s_and_b64 s[10:11], vcc, s[6:7]
+; GFX9DAGISEL-NEXT:    s_bitset0_b64 s[6:7], s12
+; GFX9DAGISEL-NEXT:    s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX9DAGISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX9DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX9DAGISEL-NEXT:  ; %bb.2:
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX9DAGISEL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9GISEL-LABEL: divergent_value_i64:
 ; GFX9GISEL:       ; %bb.0: ; %entry
-; GFX9GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, s2
-; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, s3
-; GFX9GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
-; GFX9GISEL-NEXT:    s_endpgm
+; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v4, s4
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v5, s5
+; GFX9GISEL-NEXT:    v_readlane_b32 s8, v2, s12
+; GFX9GISEL-NEXT:    v_readlane_b32 s9, v3, s12
+; GFX9GISEL-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX9GISEL-NEXT:    s_and_b64 s[10:11], vcc, s[6:7]
+; GFX9GISEL-NEXT:    s_bitset0_b64 s[6:7], s12
+; GFX9GISEL-NEXT:    s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX9GISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX9GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX9GISEL-NEXT:  ; %bb.2:
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX9GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX9GISEL-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX10DAGISEL-LABEL: divergent_value_i64:
-; GFX10DAGISEL:       ; %bb.0: ; %entry
-; GFX10DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
-; GFX10DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
-; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
-; GFX10DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
-; GFX10DAGISEL-NEXT:    s_endpgm
+; GFX1064DAGISEL-LABEL: divergent_value_i64:
+; GFX1064DAGISEL:       ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v5, s5
+; GFX1064DAGISEL-NEXT:    v_readlane_b32 s8, v2, s12
+; GFX1064DAGISEL-NEXT:    v_readlane_b32 s9, v3, s12
+; GFX1064DAGISEL-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX1064DAGISEL-NEXT:    s_and_b64 s[10:11], vcc, s[6:7]
+; GFX1064DAGISEL-NEXT:    s_bitset0_b64 s[6:7], s12
+; GFX1064DAGISEL-NEXT:    s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1064DAGISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX1064DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1064DAGISEL-NEXT:  ; %bb.2:
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1064DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064DAGISEL-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX10GISEL-LABEL: divergent_value_i64:
-; GFX10GISEL:       ; %bb.0: ; %entry
-; GFX10GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10GISEL-NEXT:    v_mov_b32_e32 v2, 0
-; GFX10GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10GISEL-NEXT:    v_mov_b32_e32 v0, s2
-; GFX10GISEL-NEXT:    v_mov_b32_e32 v1, s3
-; GFX10GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
-; GFX10GISEL-NEXT:    s_endpgm
+; GFX1064GISEL-LABEL: divergent_value_i64:
+; GFX1064GISEL:       ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v4, s4
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v5, s5
+; GFX1064GISEL-NEXT:    v_readlane_b32 s8, v2, s12
+; GFX1064GISEL-NEXT:    v_readlane_b32 s9, v3, s12
+; GFX1064GISEL-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX1064GISEL-NEXT:    s_and_b64 s[10:11], vcc, s[6:7]
+; GFX1064GISEL-NEXT:    s_bitset0_b64 s[6:7], s12
+; GFX1064GISEL-NEXT:    s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1064GISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX1064GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1064GISEL-NEXT:  ; %bb.2:
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1064GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1032DAGISEL-LABEL: divergent_value_i64:
+; GFX1032DAGISEL:       ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v5, s5
+; GFX1032DAGISEL-NEXT:    v_readlane_b32 s8, v2, s7
+; GFX1032DAGISEL-NEXT:    v_readlane_b32 s9, v3, s7
+; GFX1032DAGISEL-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[8:9], v[4:5]
+; GFX1032DAGISEL-NEXT:    s_and_b32 s10, vcc_lo, s6
+; GFX1032DAGISEL-NEXT:    s_bitset0_b32 s6, s7
+; GFX1032DAGISEL-NEXT:    s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1032DAGISEL-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX1032DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1032DAGISEL-NEXT:  ; %bb.2:
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1032DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1032GISEL-LABEL: divergent_value_i64:
+; GFX1032GISEL:       ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v4, s4
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v5, s5
+; GFX1032GISEL-NEXT:    v_readlane_b32 s8, v2, s7
+; GFX1032GISEL-NEXT:    v_readlane_b32 s9, v3, s7
+; GFX1032GISEL-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[8:9], v[4:5]
+; GFX1032GISEL-NEXT:    s_and_b32 s10, vcc_lo, s6
+; GFX1032GISEL-NEXT:    s_bitset0_b32 s6, s7
+; GFX1032GISEL-NEXT:    s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1032GISEL-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX1032GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1032GISEL-NEXT:  ; %bb.2:
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1032GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032GISEL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX1164DAGISEL-LABEL: divergent_value_i64:
 ; GFX1164DAGISEL:       ; %bb.0: ; %entry
-; GFX1164DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
-; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
-; GFX1164DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
-; GFX1164DAGISEL-NEXT:    s_endpgm
+; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v4, s0
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v5, s1
+; GFX1164DAGISEL-NEXT:    v_readlane_b32 s4, v2, s8
+; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v3, s8
+; GFX1164DAGISEL-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[4:5]
+; GFX1164DAGISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
+; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s8
+; GFX1164DAGISEL-NEXT:    s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1164DAGISEL-NEXT:    s_cmp_lg_u64 s[2:3], 0
+; GFX1164DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1164DAGISEL-NEXT:  ; %bb.2:
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX1164DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1164DAGISEL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX1164GISEL-LABEL: divergent_value_i64:
 ; GFX1164GISEL:       ; %bb.0: ; %entry
-; GFX1164GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, s2
-; GFX1164GISEL-NEXT:    v_mov_b32_e32 v1, s3
-; GFX1164GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
-; GFX1164GISEL-NEXT:    s_endpgm
+; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], 0
+; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v4, s0
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v5, s1
+; GFX1164GISEL-NEXT:    v_readlane_b32 s4, v2, s8
+; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v3, s8
+; GFX1164GISEL-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[4:5]
+; GFX1164GISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
+; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s8
+; GFX1164GISEL-NEXT:    s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1164GISEL-NEXT:    s_cmp_lg_u64 s[2:3], 0
+; GFX1164GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1164GISEL-NEXT:  ; %bb.2:
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX1164GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1164GISEL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX1132DAGISEL-LABEL: divergent_value_i64:
 ; GFX1132DAGISEL:       ; %bb.0: ; %entry
-; GFX1132DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1132DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
-; GFX1132DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
-; GFX1132DAGISEL-NEXT:    s_endpgm
+; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
+; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
+; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
+; GFX1132DAGISEL-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[4:5], v[4:5]
+; GFX1132DAGISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
+; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
+; GFX1132DAGISEL-NEXT:    s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1132DAGISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX1132DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1132DAGISEL-NEXT:  ; %bb.2:
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1132DAGISEL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX1132GISEL-LABEL: divergent_value_i64:
 ; GFX1132GISEL:       ; %bb.0: ; %entry
+<<<<<<< HEAD
 ; GFX1132GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
 ; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
@@ -1577,6 +1749,27 @@ define amdgpu_kernel void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x)
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], 0
+; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
+; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
+; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
+; GFX1132GISEL-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[4:5], v[4:5]
+; GFX1132GISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
+; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
+; GFX1132GISEL-NEXT:    s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1132GISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX1132GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1132GISEL-NEXT:  ; %bb.2:
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1132GISEL-NEXT:    s_setpc_b64 s[30:31]
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.umax.i64(i64 %id.x, i32 1)
   store i64 %result, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
index e641eeec4353c..29412bc1ed31d 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
@@ -1199,6 +1199,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
 ; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 =======
@@ -1206,6 +1207,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX8DAGISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1229,6 +1234,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL:       ; %bb.0: ; %entry
 ; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
 =======
@@ -1236,6 +1242,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX8GISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], -1
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX8GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1259,6 +1269,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL:       ; %bb.0: ; %entry
 ; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 =======
@@ -1266,6 +1277,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX9DAGISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1289,6 +1304,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL:       ; %bb.0: ; %entry
 ; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
 =======
@@ -1296,6 +1312,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX9GISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], -1
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX9GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1319,6 +1339,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL:       ; %bb.0: ; %entry
 ; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 =======
@@ -1326,6 +1347,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX1064DAGISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1348,6 +1373,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL:       ; %bb.0: ; %entry
 ; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
 =======
@@ -1355,6 +1381,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX1064GISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], -1
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX1064GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1377,6 +1407,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL:       ; %bb.0: ; %entry
 ; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
 =======
@@ -1384,6 +1415,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1406,6 +1441,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL:       ; %bb.0: ; %entry
 ; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
 =======
@@ -1413,6 +1449,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
 ; GFX1032GISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], -1
+; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1435,25 +1475,35 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL:       ; %bb.0: ; %entry
 ; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], -1
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
 =======
 ; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, -1
+=======
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], -1
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s4, v2, s8
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v3, s8
 <<<<<<< HEAD
+<<<<<<< HEAD
 =======
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164DAGISEL-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[4:5]
 ; GFX1164DAGISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
 ; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s8
@@ -1470,25 +1520,35 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL:       ; %bb.0: ; %entry
 ; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], -1
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
 =======
 ; GFX1164GISEL-NEXT:    s_mov_b32 s0, -1
+=======
+; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], -1
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s4, v2, s8
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v3, s8
 <<<<<<< HEAD
+<<<<<<< HEAD
 =======
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164GISEL-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[4:5]
 ; GFX1164GISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
 ; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s8
@@ -1505,24 +1565,34 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL:       ; %bb.0: ; %entry
 ; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], -1
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
 =======
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, -1
+=======
+; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], -1
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
 ; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
 <<<<<<< HEAD
+<<<<<<< HEAD
 =======
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132DAGISEL-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[4:5], v[4:5]
 ; GFX1132DAGISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
 ; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -1538,24 +1608,34 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL:       ; %bb.0: ; %entry
 ; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], -1
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
 =======
 ; GFX1132GISEL-NEXT:    s_mov_b32 s0, -1
+=======
+; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], -1
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
 <<<<<<< HEAD
+<<<<<<< HEAD
 =======
 ; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 >>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132GISEL-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[4:5], v[4:5]
 ; GFX1132GISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
 ; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3

>From 82095acd811c75266e1cee96fadfb153283fb16c Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Sat, 19 Jul 2025 12:48:18 +0530
Subject: [PATCH 04/12] [AMDGPU] Extending wave reduction intrinsics for `i64`
 types - 2

Supporting Arithemtic Operations: `add`, `sub`
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  2 +
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll  | 99 +++++++++++++++++++
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll  | 99 +++++++++++++++++++
 3 files changed, 200 insertions(+)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 8e02177bf23b2..b649a789a2aef 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5332,7 +5332,9 @@ static uint32_t getIdentityValueFor32BitWaveReduction(unsigned Opc) {
   case AMDGPU::S_MAX_I32:
     return std::numeric_limits<int32_t>::min();
   case AMDGPU::S_ADD_I32:
+  case AMDGPU::S_ADD_U64_PSEUDO:
   case AMDGPU::S_SUB_I32:
+  case AMDGPU::S_SUB_U64_PSEUDO:
   case AMDGPU::S_OR_B32:
   case AMDGPU::S_XOR_B32:
     return std::numeric_limits<uint32_t>::min();
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
index 5f303e30c923b..ab2475899ff45 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
@@ -1523,6 +1523,7 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
+<<<<<<< HEAD
 ;
 ; GFX12DAGISEL-LABEL: uniform_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -1539,6 +1540,8 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
+=======
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 %in, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -1741,6 +1744,7 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
+<<<<<<< HEAD
 ;
 ; GFX12DAGISEL-LABEL: const_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -1757,6 +1761,8 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX12DAGISEL-NEXT:    s_wait_kmcnt 0x0
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
+=======
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 123, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -1957,6 +1963,7 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
+<<<<<<< HEAD
 ;
 ; GFX12DAGISEL-LABEL: poison_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -1973,6 +1980,8 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
+=======
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 poison, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -1983,8 +1992,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-LABEL: divergent_value_i64:
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
 ; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX8DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX8DAGISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2004,8 +2019,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL-LABEL: divergent_value_i64:
 ; GFX8GISEL:       ; %bb.0: ; %entry
 ; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX8GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8GISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX8GISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2025,8 +2046,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL-LABEL: divergent_value_i64:
 ; GFX9DAGISEL:       ; %bb.0: ; %entry
 ; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX9DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX9DAGISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2046,8 +2073,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL-LABEL: divergent_value_i64:
 ; GFX9GISEL:       ; %bb.0: ; %entry
 ; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX9GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9GISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX9GISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2067,8 +2100,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL-LABEL: divergent_value_i64:
 ; GFX1064DAGISEL:       ; %bb.0: ; %entry
 ; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX1064DAGISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2087,8 +2126,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL-LABEL: divergent_value_i64:
 ; GFX1064GISEL:       ; %bb.0: ; %entry
 ; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX1064GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX1064GISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2107,8 +2152,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL-LABEL: divergent_value_i64:
 ; GFX1032DAGISEL:       ; %bb.0: ; %entry
 ; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+=======
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032DAGISEL-NEXT:    v_readlane_b32 s8, v2, s7
@@ -2127,8 +2178,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL-LABEL: divergent_value_i64:
 ; GFX1032GISEL:       ; %bb.0: ; %entry
 ; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+=======
+; GFX1032GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032GISEL-NEXT:    v_readlane_b32 s8, v2, s7
@@ -2147,11 +2204,20 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL-LABEL: divergent_value_i64:
 ; GFX1164DAGISEL:       ; %bb.0: ; %entry
 ; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
+=======
+; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v2, s4
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s6, v3, s4
 ; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s4
@@ -2168,11 +2234,20 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL-LABEL: divergent_value_i64:
 ; GFX1164GISEL:       ; %bb.0: ; %entry
 ; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
+=======
+; GFX1164GISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v2, s4
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s6, v3, s4
 ; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s4
@@ -2189,11 +2264,20 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL-LABEL: divergent_value_i64:
 ; GFX1132DAGISEL:       ; %bb.0: ; %entry
 ; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
+=======
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
 ; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -2209,11 +2293,20 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-LABEL: divergent_value_i64:
 ; GFX1132GISEL:       ; %bb.0: ; %entry
 ; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
+=======
+; GFX1132GISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
 ; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -2225,6 +2318,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
 ; GFX1132GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX1132GISEL-NEXT:    s_setpc_b64 s[30:31]
+<<<<<<< HEAD
 ;
 ; GFX12DAGISEL-LABEL: divergent_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -2251,6 +2345,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
 ; GFX12DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX12DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+=======
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 %id.x, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -2728,6 +2824,7 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
+<<<<<<< HEAD
 ;
 ; GFX12DAGISEL-LABEL: divergent_cfg_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -2771,6 +2868,8 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX12DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
+=======
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 entry:
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %d_cmp = icmp ult i32 %tid, 16
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
index bc8bf7f4b3b6f..a940a5079864b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
@@ -1629,6 +1629,7 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
+<<<<<<< HEAD
 ;
 ; GFX12DAGISEL-LABEL: uniform_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -1650,6 +1651,8 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
+=======
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 %in, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -1902,6 +1905,7 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
+<<<<<<< HEAD
 ;
 ; GFX12DAGISEL-LABEL: const_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -1923,6 +1927,8 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX12DAGISEL-NEXT:    s_wait_kmcnt 0x0
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
+=======
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 123, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -2175,6 +2181,7 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
+<<<<<<< HEAD
 ;
 ; GFX12DAGISEL-LABEL: poison_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -2196,6 +2203,8 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
+=======
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 poison, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -2206,8 +2215,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-LABEL: divergent_value_i64:
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
 ; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX8DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX8DAGISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2227,8 +2242,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL-LABEL: divergent_value_i64:
 ; GFX8GISEL:       ; %bb.0: ; %entry
 ; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX8GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8GISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX8GISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2248,8 +2269,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL-LABEL: divergent_value_i64:
 ; GFX9DAGISEL:       ; %bb.0: ; %entry
 ; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX9DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX9DAGISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2269,8 +2296,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL-LABEL: divergent_value_i64:
 ; GFX9GISEL:       ; %bb.0: ; %entry
 ; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX9GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9GISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX9GISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2290,8 +2323,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL-LABEL: divergent_value_i64:
 ; GFX1064DAGISEL:       ; %bb.0: ; %entry
 ; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX1064DAGISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2310,8 +2349,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL-LABEL: divergent_value_i64:
 ; GFX1064GISEL:       ; %bb.0: ; %entry
 ; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+=======
+; GFX1064GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX1064GISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2330,8 +2375,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL-LABEL: divergent_value_i64:
 ; GFX1032DAGISEL:       ; %bb.0: ; %entry
 ; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+=======
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032DAGISEL-NEXT:    v_readlane_b32 s8, v2, s7
@@ -2350,8 +2401,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL-LABEL: divergent_value_i64:
 ; GFX1032GISEL:       ; %bb.0: ; %entry
 ; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+=======
+; GFX1032GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT:    s_mov_b32 s5, s4
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032GISEL-NEXT:    v_readlane_b32 s8, v2, s7
@@ -2370,11 +2427,20 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL-LABEL: divergent_value_i64:
 ; GFX1164DAGISEL:       ; %bb.0: ; %entry
 ; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
+=======
+; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v2, s4
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s6, v3, s4
 ; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s4
@@ -2391,11 +2457,20 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL-LABEL: divergent_value_i64:
 ; GFX1164GISEL:       ; %bb.0: ; %entry
 ; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
+=======
+; GFX1164GISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v2, s4
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s6, v3, s4
 ; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s4
@@ -2412,11 +2487,20 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL-LABEL: divergent_value_i64:
 ; GFX1132DAGISEL:       ; %bb.0: ; %entry
 ; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
+=======
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
 ; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -2432,11 +2516,20 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-LABEL: divergent_value_i64:
 ; GFX1132GISEL:       ; %bb.0: ; %entry
 ; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
 ; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
+=======
+; GFX1132GISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
 ; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -2448,6 +2541,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
 ; GFX1132GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX1132GISEL-NEXT:    s_setpc_b64 s[30:31]
+<<<<<<< HEAD
 ;
 ; GFX12DAGISEL-LABEL: divergent_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -2474,6 +2568,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
 ; GFX12DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX12DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+=======
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 %id.x, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -3056,6 +3152,7 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
+<<<<<<< HEAD
 ;
 ; GFX12DAGISEL-LABEL: divergent_cfg_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -3110,6 +3207,8 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX12DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
+=======
+>>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
 entry:
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %d_cmp = icmp ult i32 %tid, 16

>From bc4afc9ebafb271bffcd03751d2ff8701fa60544 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Wed, 13 Aug 2025 18:22:54 +0530
Subject: [PATCH 05/12] Marking dead scc

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  2 -
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll  | 76 ++++++++++++++++++-
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll  | 76 ++++++++++++++++++-
 3 files changed, 144 insertions(+), 10 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index b649a789a2aef..8e02177bf23b2 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5332,9 +5332,7 @@ static uint32_t getIdentityValueFor32BitWaveReduction(unsigned Opc) {
   case AMDGPU::S_MAX_I32:
     return std::numeric_limits<int32_t>::min();
   case AMDGPU::S_ADD_I32:
-  case AMDGPU::S_ADD_U64_PSEUDO:
   case AMDGPU::S_SUB_I32:
-  case AMDGPU::S_SUB_U64_PSEUDO:
   case AMDGPU::S_OR_B32:
   case AMDGPU::S_XOR_B32:
     return std::numeric_limits<uint32_t>::min();
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
index ab2475899ff45..41059e4edc30c 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
@@ -1993,6 +1993,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
 ; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 =======
@@ -2000,6 +2001,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX8DAGISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX8DAGISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2020,6 +2025,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL:       ; %bb.0: ; %entry
 ; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
 =======
@@ -2027,6 +2033,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX8GISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8GISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX8GISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2047,6 +2057,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL:       ; %bb.0: ; %entry
 ; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 =======
@@ -2054,6 +2065,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX9DAGISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX9DAGISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2074,6 +2089,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL:       ; %bb.0: ; %entry
 ; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
 =======
@@ -2081,6 +2097,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX9GISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9GISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX9GISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2101,6 +2121,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL:       ; %bb.0: ; %entry
 ; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 =======
@@ -2108,6 +2129,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX1064DAGISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX1064DAGISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2127,6 +2152,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL:       ; %bb.0: ; %entry
 ; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
 =======
@@ -2134,6 +2160,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX1064GISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX1064GISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2153,6 +2183,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL:       ; %bb.0: ; %entry
 ; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
 =======
@@ -2160,6 +2191,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032DAGISEL-NEXT:    v_readlane_b32 s8, v2, s7
@@ -2179,6 +2214,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL:       ; %bb.0: ; %entry
 ; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
 =======
@@ -2186,6 +2222,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
 ; GFX1032GISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032GISEL-NEXT:    v_readlane_b32 s8, v2, s7
@@ -2205,6 +2245,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL:       ; %bb.0: ; %entry
 ; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
@@ -2212,12 +2253,18 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
 =======
 ; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, 0
+=======
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
+<<<<<<< HEAD
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v2, s4
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s6, v3, s4
 ; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s4
@@ -2235,6 +2282,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL:       ; %bb.0: ; %entry
 ; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
@@ -2242,12 +2290,18 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
 =======
 ; GFX1164GISEL-NEXT:    s_mov_b32 s0, 0
+=======
+; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], 0
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
+<<<<<<< HEAD
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v2, s4
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s6, v3, s4
 ; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s4
@@ -2265,6 +2319,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL:       ; %bb.0: ; %entry
 ; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
@@ -2272,12 +2327,18 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
 =======
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, 0
+=======
+; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
+<<<<<<< HEAD
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
 ; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -2294,6 +2355,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL:       ; %bb.0: ; %entry
 ; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
@@ -2301,12 +2363,18 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
 =======
 ; GFX1132GISEL-NEXT:    s_mov_b32 s0, 0
+=======
+; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], 0
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
+<<<<<<< HEAD
 ; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
 ; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
index a940a5079864b..156bdd00c32ae 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
@@ -2216,6 +2216,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
 ; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 =======
@@ -2223,6 +2224,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX8DAGISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX8DAGISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2243,6 +2248,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL:       ; %bb.0: ; %entry
 ; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
 =======
@@ -2250,6 +2256,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX8GISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8GISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX8GISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2270,6 +2280,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL:       ; %bb.0: ; %entry
 ; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 =======
@@ -2277,6 +2288,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX9DAGISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX9DAGISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2297,6 +2312,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL:       ; %bb.0: ; %entry
 ; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
 =======
@@ -2304,6 +2320,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX9GISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9GISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX9GISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2324,6 +2344,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL:       ; %bb.0: ; %entry
 ; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 =======
@@ -2331,6 +2352,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX1064DAGISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX1064DAGISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2350,6 +2375,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL:       ; %bb.0: ; %entry
 ; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
 =======
@@ -2357,6 +2383,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX1064GISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX1064GISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2376,6 +2406,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL:       ; %bb.0: ; %entry
 ; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
 =======
@@ -2383,6 +2414,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032DAGISEL-NEXT:    v_readlane_b32 s8, v2, s7
@@ -2402,6 +2437,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL:       ; %bb.0: ; %entry
 ; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
 =======
@@ -2409,6 +2445,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
 ; GFX1032GISEL-NEXT:    s_mov_b32 s5, s4
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], 0
+; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032GISEL-NEXT:    v_readlane_b32 s8, v2, s7
@@ -2428,6 +2468,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL:       ; %bb.0: ; %entry
 ; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
@@ -2435,12 +2476,18 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
 =======
 ; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, 0
+=======
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
+<<<<<<< HEAD
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v2, s4
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s6, v3, s4
 ; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s4
@@ -2458,6 +2505,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL:       ; %bb.0: ; %entry
 ; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
@@ -2465,12 +2513,18 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
 =======
 ; GFX1164GISEL-NEXT:    s_mov_b32 s0, 0
+=======
+; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], 0
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
+<<<<<<< HEAD
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v2, s4
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s6, v3, s4
 ; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s4
@@ -2488,6 +2542,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL:       ; %bb.0: ; %entry
 ; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
@@ -2495,12 +2550,18 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
 =======
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, 0
+=======
+; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
+<<<<<<< HEAD
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
 ; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -2517,6 +2578,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL:       ; %bb.0: ; %entry
 ; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 <<<<<<< HEAD
+<<<<<<< HEAD
 ; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
@@ -2524,12 +2586,18 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
 =======
 ; GFX1132GISEL-NEXT:    s_mov_b32 s0, 0
+=======
+; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], 0
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
+<<<<<<< HEAD
 ; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+>>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
 ; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3

>From 634c7e198fbc093b077232939180e8551a743f41 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Wed, 20 Aug 2025 14:55:18 +0530
Subject: [PATCH 06/12] Checking for targets with native 64-bit `add`/`sub`
 support

---
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll  | 30 +++++++++++++++++++
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll  | 30 +++++++++++++++++++
 2 files changed, 60 insertions(+)

diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
index 41059e4edc30c..ac84c71f5b103 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
@@ -1524,6 +1524,9 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
 <<<<<<< HEAD
+<<<<<<< HEAD
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: uniform_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -1540,8 +1543,11 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
+<<<<<<< HEAD
 =======
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 %in, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -1745,6 +1751,9 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
 <<<<<<< HEAD
+<<<<<<< HEAD
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: const_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -1761,8 +1770,11 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX12DAGISEL-NEXT:    s_wait_kmcnt 0x0
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
+<<<<<<< HEAD
 =======
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 123, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -1964,6 +1976,9 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
 <<<<<<< HEAD
+<<<<<<< HEAD
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: poison_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -1980,8 +1995,11 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
+<<<<<<< HEAD
 =======
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 poison, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -2387,6 +2405,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX1132GISEL-NEXT:    s_setpc_b64 s[30:31]
 <<<<<<< HEAD
+<<<<<<< HEAD
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: divergent_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -2413,8 +2434,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
 ; GFX12DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX12DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+<<<<<<< HEAD
 =======
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 %id.x, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -2893,6 +2917,9 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
 <<<<<<< HEAD
+<<<<<<< HEAD
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: divergent_cfg_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -2936,8 +2963,11 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX12DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
+<<<<<<< HEAD
 =======
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %d_cmp = icmp ult i32 %tid, 16
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
index 156bdd00c32ae..88637bcf882a7 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
@@ -1630,6 +1630,9 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
 <<<<<<< HEAD
+<<<<<<< HEAD
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: uniform_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -1651,8 +1654,11 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
+<<<<<<< HEAD
 =======
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 %in, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -1906,6 +1912,9 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
 <<<<<<< HEAD
+<<<<<<< HEAD
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: const_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -1927,8 +1936,11 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX12DAGISEL-NEXT:    s_wait_kmcnt 0x0
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
+<<<<<<< HEAD
 =======
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 123, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -2182,6 +2194,9 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
 <<<<<<< HEAD
+<<<<<<< HEAD
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: poison_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -2203,8 +2218,11 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
+<<<<<<< HEAD
 =======
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 poison, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -2610,6 +2628,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX1132GISEL-NEXT:    s_setpc_b64 s[30:31]
 <<<<<<< HEAD
+<<<<<<< HEAD
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: divergent_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -2636,8 +2657,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
 ; GFX12DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX12DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+<<<<<<< HEAD
 =======
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 %id.x, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -3221,6 +3245,9 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
 <<<<<<< HEAD
+<<<<<<< HEAD
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: divergent_cfg_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -3275,8 +3302,11 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX12DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
+<<<<<<< HEAD
 =======
 >>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
+=======
+>>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %d_cmp = icmp ult i32 %tid, 16

>From e1760a2867c6420d5d252a966f469d174a4fca69 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Sat, 19 Jul 2025 12:48:18 +0530
Subject: [PATCH 07/12] [AMDGPU] Extending wave reduction intrinsics for `i64`
 types - 3

Supporting Arithemtic Operations: `and`, `or`, `xor`
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  104 +-
 llvm/lib/Target/AMDGPU/SIInstructions.td      |    3 +
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.and.ll  |  854 ++++++++++
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.or.ll   |  855 ++++++++++
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll  | 1413 +++++++++++++++++
 5 files changed, 3210 insertions(+), 19 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 8e02177bf23b2..d506b42a6e90a 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5334,9 +5334,12 @@ static uint32_t getIdentityValueFor32BitWaveReduction(unsigned Opc) {
   case AMDGPU::S_ADD_I32:
   case AMDGPU::S_SUB_I32:
   case AMDGPU::S_OR_B32:
+  case AMDGPU::S_OR_B64:
   case AMDGPU::S_XOR_B32:
+  case AMDGPU::S_XOR_B64:
     return std::numeric_limits<uint32_t>::min();
   case AMDGPU::S_AND_B32:
+  case AMDGPU::S_AND_B64:
     return std::numeric_limits<uint32_t>::max();
   default:
     llvm_unreachable(
@@ -5392,7 +5395,9 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
     case AMDGPU::S_MAX_U32:
     case AMDGPU::S_MAX_I32:
     case AMDGPU::S_AND_B32:
-    case AMDGPU::S_OR_B32: {
+    case AMDGPU::S_AND_B64:
+    case AMDGPU::S_OR_B32:
+    case AMDGPU::S_OR_B64: {
       // Idempotent operations.
       BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MOV_B32), DstReg).addReg(SrcReg);
       RetBB = &BB;
@@ -5417,6 +5422,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
       break;
     }
     case AMDGPU::S_XOR_B32:
+    case AMDGPU::S_XOR_B64:
     case AMDGPU::S_ADD_I32:
     case AMDGPU::S_ADD_U64_PSEUDO:
     case AMDGPU::S_SUB_I32:
@@ -5439,24 +5445,69 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
           BuildMI(BB, MI, DL, TII->get(BitCountOpc), NumActiveLanes)
               .addReg(ExecMask);
 
-      switch (Opc) {
-      case AMDGPU::S_XOR_B32: {
-        // Performing an XOR operation on a uniform value
-        // depends on the parity of the number of active lanes.
-        // For even parity, the result will be 0, for odd
-        // parity the result will be the same as the input value.
-        Register ParityRegister =
-            MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
-
-        BuildMI(BB, MI, DL, TII->get(AMDGPU::S_AND_B32), ParityRegister)
-            .addReg(NewAccumulator->getOperand(0).getReg())
-            .addImm(1)
-            .setOperandDead(3); // Dead scc
-        BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DstReg)
-            .addReg(SrcReg)
-            .addReg(ParityRegister);
-        break;
-      }
+          switch (Opc) {
+          case AMDGPU::S_XOR_B32:
+          case AMDGPU::S_XOR_B64: {
+            // Performing an XOR operation on a uniform value
+            // depends on the parity of the number of active lanes.
+            // For even parity, the result will be 0, for odd
+            // parity the result will be the same as the input value.
+            Register ParityRegister =
+                MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+
+            BuildMI(BB, MI, DL, TII->get(AMDGPU::S_AND_B32), ParityRegister)
+                .addReg(NewAccumulator->getOperand(0).getReg())
+                .addImm(1)
+                .setOperandDead(3); // Dead scc
+            if (is32BitOpc) {
+              BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DstReg)
+                  .addReg(SrcReg)
+                  .addReg(ParityRegister);
+              break;
+            } else {
+              Register DestSub0 =
+                  MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+              Register DestSub1 =
+                  MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+              Register Op1H_Op0L_Reg =
+                  MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+              Register CarryReg =
+                  MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+
+              const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
+              const TargetRegisterClass *SrcSubRC =
+                  TRI->getSubRegisterClass(SrcRC, AMDGPU::sub0);
+
+              MachineOperand Op1L = TII->buildExtractSubRegOrImm(
+                  MI, MRI, MI.getOperand(1), SrcRC, AMDGPU::sub0, SrcSubRC);
+              MachineOperand Op1H = TII->buildExtractSubRegOrImm(
+                  MI, MRI, MI.getOperand(1), SrcRC, AMDGPU::sub1, SrcSubRC);
+
+              BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DestSub0)
+                  .add(Op1L)
+                  .addReg(ParityRegister);
+
+              BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), Op1H_Op0L_Reg)
+                  .add(Op1H)
+                  .addReg(ParityRegister);
+
+              BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_HI_U32), CarryReg)
+                  .add(Op1L)
+                  .addReg(ParityRegister);
+
+              BuildMI(BB, MI, DL, TII->get(AMDGPU::S_ADD_U32), DestSub1)
+                  .addReg(CarryReg)
+                  .addReg(Op1H_Op0L_Reg)
+                  .setOperandDead(3); // Dead scc
+
+              BuildMI(BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), DstReg)
+                  .addReg(DestSub0)
+                  .addImm(AMDGPU::sub0)
+                  .addReg(DestSub1)
+                  .addImm(AMDGPU::sub1);
+              break;
+            }
+          }
       case AMDGPU::S_SUB_I32: {
         Register NegatedVal = MRI.createVirtualRegister(DstRegClass);
 
@@ -5652,6 +5703,15 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
                            .addReg(LaneValueHiReg)
                            .addImm(AMDGPU::sub1);
       switch (Opc) {
+      case ::AMDGPU::S_OR_B64:
+      case ::AMDGPU::S_AND_B64:
+      case ::AMDGPU::S_XOR_B64: {
+        NewAccumulator = BuildMI(*ComputeLoop, I, DL, TII->get(Opc), DstReg)
+                             .addReg(Accumulator->getOperand(0).getReg())
+                             .addReg(LaneValue->getOperand(0).getReg())
+                             .setOperandDead(3); // Dead scc
+        break;
+      }
       case AMDGPU::V_CMP_GT_I64_e64:
       case AMDGPU::V_CMP_GT_U64_e64:
       case AMDGPU::V_CMP_LT_I64_e64:
@@ -5760,10 +5820,16 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_SUB_U64_PSEUDO);
   case AMDGPU::WAVE_REDUCE_AND_PSEUDO_B32:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_AND_B32);
+  case AMDGPU::WAVE_REDUCE_AND_PSEUDO_B64:
+    return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_AND_B64);
   case AMDGPU::WAVE_REDUCE_OR_PSEUDO_B32:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_OR_B32);
+  case AMDGPU::WAVE_REDUCE_OR_PSEUDO_B64:
+    return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_OR_B64);
   case AMDGPU::WAVE_REDUCE_XOR_PSEUDO_B32:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_XOR_B32);
+  case AMDGPU::WAVE_REDUCE_XOR_PSEUDO_B64:
+    return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_XOR_B64);
   case AMDGPU::S_UADDO_PSEUDO:
   case AMDGPU::S_USUBO_PSEUDO: {
     const DebugLoc &DL = MI.getDebugLoc();
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index d6fe587e081cf..1f7951258c219 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -369,6 +369,9 @@ defvar Operations = [
   WaveReduceOp<"max", "I64", i64, SGPR_64, VSrc_b64>,
   WaveReduceOp<"add", "U64", i64, SGPR_64, VSrc_b64>,
   WaveReduceOp<"sub", "U64", i64, SGPR_64, VSrc_b64>,
+  WaveReduceOp<"and", "B64", i64, SGPR_64, VSrc_b64>,
+  WaveReduceOp<"or", "B64", i64, SGPR_64, VSrc_b64>,
+  WaveReduceOp<"xor", "B64", i64, SGPR_64, VSrc_b64>,
 ];
 
 foreach Op = Operations in {
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.and.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.and.ll
index 356b0e73b39e7..55e6189f65675 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.and.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.and.ll
@@ -980,3 +980,857 @@ endif:
   store i32 %combine, ptr addrspace(1) %out
   ret void
 }
+
+define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: uniform_value_i64:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT:    s_endpgm
+;
+; GFX8GISEL-LABEL: uniform_value_i64:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT:    s_endpgm
+;
+; GFX9DAGISEL-LABEL: uniform_value_i64:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT:    s_endpgm
+;
+; GFX9GISEL-LABEL: uniform_value_i64:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT:    s_endpgm
+;
+; GFX10DAGISEL-LABEL: uniform_value_i64:
+; GFX10DAGISEL:       ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX10DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX10DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT:    s_endpgm
+;
+; GFX10GISEL-LABEL: uniform_value_i64:
+; GFX10GISEL:       ; %bb.0: ; %entry
+; GFX10GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX10GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX10GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT:    s_endpgm
+;
+; GFX1164DAGISEL-LABEL: uniform_value_i64:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT:    s_endpgm
+;
+; GFX1164GISEL-LABEL: uniform_value_i64:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT:    s_endpgm
+;
+; GFX1132DAGISEL-LABEL: uniform_value_i64:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT:    s_endpgm
+;
+; GFX1132GISEL-LABEL: uniform_value_i64:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT:    s_endpgm
+entry:
+  %result = call i64 @llvm.amdgcn.wave.reduce.and.i64(i64 %in, i32 1)
+  store i64 %result, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
+; GFX8DAGISEL-LABEL: const_value_i64:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT:    s_endpgm
+;
+; GFX8GISEL-LABEL: const_value_i64:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT:    s_endpgm
+;
+; GFX9DAGISEL-LABEL: const_value_i64:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT:    s_endpgm
+;
+; GFX9GISEL-LABEL: const_value_i64:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT:    s_endpgm
+;
+; GFX10DAGISEL-LABEL: const_value_i64:
+; GFX10DAGISEL:       ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX10DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT:    s_endpgm
+;
+; GFX10GISEL-LABEL: const_value_i64:
+; GFX10GISEL:       ; %bb.0: ; %entry
+; GFX10GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10GISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX10GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX10GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT:    s_endpgm
+;
+; GFX1164DAGISEL-LABEL: const_value_i64:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT:    s_endpgm
+;
+; GFX1164GISEL-LABEL: const_value_i64:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT:    s_endpgm
+;
+; GFX1132DAGISEL-LABEL: const_value_i64:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
+; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT:    s_endpgm
+;
+; GFX1132GISEL-LABEL: const_value_i64:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132GISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT:    s_endpgm
+entry:
+  %result = call i64 @llvm.amdgcn.wave.reduce.and.i64(i64 123, i32 1)
+  store i64 %result, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: poison_value_i64:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[0:1], v[0:1]
+; GFX8DAGISEL-NEXT:    s_endpgm
+;
+; GFX8GISEL-LABEL: poison_value_i64:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[0:1], v[0:1]
+; GFX8GISEL-NEXT:    s_endpgm
+;
+; GFX9DAGISEL-LABEL: poison_value_i64:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT:    s_endpgm
+;
+; GFX9GISEL-LABEL: poison_value_i64:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX9GISEL-NEXT:    s_endpgm
+;
+; GFX10DAGISEL-LABEL: poison_value_i64:
+; GFX10DAGISEL:       ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX10DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT:    global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT:    s_endpgm
+;
+; GFX10GISEL-LABEL: poison_value_i64:
+; GFX10GISEL:       ; %bb.0: ; %entry
+; GFX10GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10GISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX10GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT:    global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX10GISEL-NEXT:    s_endpgm
+;
+; GFX11DAGISEL-LABEL: poison_value_i64:
+; GFX11DAGISEL:       ; %bb.0: ; %entry
+; GFX11DAGISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX11DAGISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX11DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11DAGISEL-NEXT:    global_store_b64 v0, v[0:1], s[0:1]
+; GFX11DAGISEL-NEXT:    s_endpgm
+;
+; GFX11GISEL-LABEL: poison_value_i64:
+; GFX11GISEL:       ; %bb.0: ; %entry
+; GFX11GISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX11GISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX11GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11GISEL-NEXT:    global_store_b64 v0, v[0:1], s[0:1]
+; GFX11GISEL-NEXT:    s_endpgm
+entry:
+  %result = call i64 @llvm.amdgcn.wave.reduce.and.i64(i64 poison, i32 1)
+  store i64 %result, ptr addrspace(1) %out
+  ret void
+}
+
+define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
+; GFX8DAGISEL-LABEL: divergent_value_i64:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    s_mov_b32 s4, -1
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT:    s_mov_b32 s5, s4
+; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
+; GFX8DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX8DAGISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX8DAGISEL-NEXT:    s_bitset0_b64 s[6:7], s10
+; GFX8DAGISEL-NEXT:    s_and_b64 s[4:5], s[4:5], s[8:9]
+; GFX8DAGISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX8DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX8DAGISEL-NEXT:  ; %bb.2:
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX8DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8GISEL-LABEL: divergent_value_i64:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8GISEL-NEXT:    s_mov_b32 s4, -1
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT:    s_mov_b32 s5, s4
+; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8GISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
+; GFX8GISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX8GISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX8GISEL-NEXT:    s_bitset0_b64 s[6:7], s10
+; GFX8GISEL-NEXT:    s_and_b64 s[4:5], s[4:5], s[8:9]
+; GFX8GISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX8GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX8GISEL-NEXT:  ; %bb.2:
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX8GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9DAGISEL-LABEL: divergent_value_i64:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    s_mov_b32 s4, -1
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT:    s_mov_b32 s5, s4
+; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
+; GFX9DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX9DAGISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX9DAGISEL-NEXT:    s_bitset0_b64 s[6:7], s10
+; GFX9DAGISEL-NEXT:    s_and_b64 s[4:5], s[4:5], s[8:9]
+; GFX9DAGISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX9DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX9DAGISEL-NEXT:  ; %bb.2:
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX9DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9GISEL-LABEL: divergent_value_i64:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9GISEL-NEXT:    s_mov_b32 s4, -1
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT:    s_mov_b32 s5, s4
+; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9GISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
+; GFX9GISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX9GISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX9GISEL-NEXT:    s_bitset0_b64 s[6:7], s10
+; GFX9GISEL-NEXT:    s_and_b64 s[4:5], s[4:5], s[8:9]
+; GFX9GISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX9GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX9GISEL-NEXT:  ; %bb.2:
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX9GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX9GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1064DAGISEL-LABEL: divergent_value_i64:
+; GFX1064DAGISEL:       ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, -1
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT:    s_mov_b32 s5, s4
+; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
+; GFX1064DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX1064DAGISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX1064DAGISEL-NEXT:    s_bitset0_b64 s[6:7], s10
+; GFX1064DAGISEL-NEXT:    s_and_b64 s[4:5], s[4:5], s[8:9]
+; GFX1064DAGISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX1064DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1064DAGISEL-NEXT:  ; %bb.2:
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1064DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1064GISEL-LABEL: divergent_value_i64:
+; GFX1064GISEL:       ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064GISEL-NEXT:    s_mov_b32 s4, -1
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT:    s_mov_b32 s5, s4
+; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
+; GFX1064GISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX1064GISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX1064GISEL-NEXT:    s_bitset0_b64 s[6:7], s10
+; GFX1064GISEL-NEXT:    s_and_b64 s[4:5], s[4:5], s[8:9]
+; GFX1064GISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX1064GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1064GISEL-NEXT:  ; %bb.2:
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1064GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1032DAGISEL-LABEL: divergent_value_i64:
+; GFX1032DAGISEL:       ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, -1
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s5, s4
+; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
+; GFX1032DAGISEL-NEXT:    v_readlane_b32 s8, v2, s7
+; GFX1032DAGISEL-NEXT:    v_readlane_b32 s9, v3, s7
+; GFX1032DAGISEL-NEXT:    s_bitset0_b32 s6, s7
+; GFX1032DAGISEL-NEXT:    s_and_b64 s[4:5], s[4:5], s[8:9]
+; GFX1032DAGISEL-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX1032DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1032DAGISEL-NEXT:  ; %bb.2:
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1032DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1032GISEL-LABEL: divergent_value_i64:
+; GFX1032GISEL:       ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032GISEL-NEXT:    s_mov_b32 s4, -1
+; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT:    s_mov_b32 s5, s4
+; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
+; GFX1032GISEL-NEXT:    v_readlane_b32 s8, v2, s7
+; GFX1032GISEL-NEXT:    v_readlane_b32 s9, v3, s7
+; GFX1032GISEL-NEXT:    s_bitset0_b32 s6, s7
+; GFX1032GISEL-NEXT:    s_and_b64 s[4:5], s[4:5], s[8:9]
+; GFX1032GISEL-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX1032GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1032GISEL-NEXT:  ; %bb.2:
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1032GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1164DAGISEL-LABEL: divergent_value_i64:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s6, s[2:3]
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT:    v_readlane_b32 s4, v2, s6
+; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v3, s6
+; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s6
+; GFX1164DAGISEL-NEXT:    s_and_b64 s[0:1], s[0:1], s[4:5]
+; GFX1164DAGISEL-NEXT:    s_cmp_lg_u64 s[2:3], 0
+; GFX1164DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1164DAGISEL-NEXT:  ; %bb.2:
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX1164DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1164DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1164GISEL-LABEL: divergent_value_i64:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s6, s[2:3]
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT:    v_readlane_b32 s4, v2, s6
+; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v3, s6
+; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s6
+; GFX1164GISEL-NEXT:    s_and_b64 s[0:1], s[0:1], s[4:5]
+; GFX1164GISEL-NEXT:    s_cmp_lg_u64 s[2:3], 0
+; GFX1164GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1164GISEL-NEXT:  ; %bb.2:
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX1164GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1164GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1132DAGISEL-LABEL: divergent_value_i64:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
+; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
+; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
+; GFX1132DAGISEL-NEXT:    s_and_b64 s[0:1], s[0:1], s[4:5]
+; GFX1132DAGISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX1132DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1132DAGISEL-NEXT:  ; %bb.2:
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1132DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1132GISEL-LABEL: divergent_value_i64:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
+; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
+; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
+; GFX1132GISEL-NEXT:    s_and_b64 s[0:1], s[0:1], s[4:5]
+; GFX1132GISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX1132GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1132GISEL-NEXT:  ; %bb.2:
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1132GISEL-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %result = call i64 @llvm.amdgcn.wave.reduce.and.i64(i64 %id.x, i32 1)
+  store i64 %result, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64 %in2) {
+; GFX8DAGISEL-LABEL: divergent_cfg_i64:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX8DAGISEL-NEXT:    s_and_saveexec_b64 s[6:7], vcc
+; GFX8DAGISEL-NEXT:    s_xor_b64 s[6:7], exec, s[6:7]
+; GFX8DAGISEL-NEXT:    s_or_saveexec_b64 s[6:7], s[6:7]
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX8DAGISEL-NEXT:    s_xor_b64 exec, exec, s[6:7]
+; GFX8DAGISEL-NEXT:  ; %bb.1: ; %if
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX8DAGISEL-NEXT:  ; %bb.2: ; %endif
+; GFX8DAGISEL-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT:    s_endpgm
+;
+; GFX8GISEL-LABEL: divergent_cfg_i64:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT:    v_cmp_le_u32_e32 vcc, 16, v0
+; GFX8GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX8GISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX8GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8GISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX8GISEL-NEXT:  ; %bb.1: ; %else
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; GFX8GISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX8GISEL-NEXT:    s_cbranch_execz .LBB9_4
+; GFX8GISEL-NEXT:  ; %bb.3: ; %if
+; GFX8GISEL-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], s[4:5]
+; GFX8GISEL-NEXT:  .LBB9_4: ; %endif
+; GFX8GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT:    s_endpgm
+;
+; GFX9DAGISEL-LABEL: divergent_cfg_i64:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX9DAGISEL-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX9DAGISEL-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
+; GFX9DAGISEL-NEXT:    s_or_saveexec_b64 s[4:5], s[4:5]
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT:    s_xor_b64 exec, exec, s[4:5]
+; GFX9DAGISEL-NEXT:  ; %bb.1: ; %if
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9DAGISEL-NEXT:  ; %bb.2: ; %endif
+; GFX9DAGISEL-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT:    s_endpgm
+;
+; GFX9GISEL-LABEL: divergent_cfg_i64:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT:    v_cmp_le_u32_e32 vcc, 16, v0
+; GFX9GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX9GISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX9GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9GISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX9GISEL-NEXT:  ; %bb.1: ; %else
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; GFX9GISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX9GISEL-NEXT:    s_cbranch_execz .LBB9_4
+; GFX9GISEL-NEXT:  ; %bb.3: ; %if
+; GFX9GISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], s[6:7]
+; GFX9GISEL-NEXT:  .LBB9_4: ; %endif
+; GFX9GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT:    s_endpgm
+;
+; GFX1064DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1064DAGISEL:       ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT:    s_clause 0x1
+; GFX1064DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX1064DAGISEL-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX1064DAGISEL-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
+; GFX1064DAGISEL-NEXT:    s_or_saveexec_b64 s[4:5], s[4:5]
+; GFX1064DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT:    s_xor_b64 exec, exec, s[4:5]
+; GFX1064DAGISEL-NEXT:  ; %bb.1: ; %if
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1064DAGISEL-NEXT:  ; %bb.2: ; %endif
+; GFX1064DAGISEL-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT:    s_endpgm
+;
+; GFX1064GISEL-LABEL: divergent_cfg_i64:
+; GFX1064GISEL:       ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT:    v_cmp_le_u32_e32 vcc, 16, v0
+; GFX1064GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1064GISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX1064GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1064GISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX1064GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; GFX1064GISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX1064GISEL-NEXT:    s_cbranch_execz .LBB9_4
+; GFX1064GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1064GISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], s[6:7]
+; GFX1064GISEL-NEXT:  .LBB9_4: ; %endif
+; GFX1064GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT:    s_endpgm
+;
+; GFX1032DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1032DAGISEL:       ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT:    s_clause 0x1
+; GFX1032DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc_lo, 15, v0
+; GFX1032DAGISEL-NEXT:    s_and_saveexec_b32 s4, vcc_lo
+; GFX1032DAGISEL-NEXT:    s_xor_b32 s4, exec_lo, s4
+; GFX1032DAGISEL-NEXT:    s_or_saveexec_b32 s4, s4
+; GFX1032DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT:    s_xor_b32 exec_lo, exec_lo, s4
+; GFX1032DAGISEL-NEXT:  ; %bb.1: ; %if
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1032DAGISEL-NEXT:  ; %bb.2: ; %endif
+; GFX1032DAGISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s4
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT:    s_endpgm
+;
+; GFX1032GISEL-LABEL: divergent_cfg_i64:
+; GFX1032GISEL:       ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT:    v_cmp_le_u32_e32 vcc_lo, 16, v0
+; GFX1032GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1032GISEL-NEXT:    s_and_saveexec_b32 s8, vcc_lo
+; GFX1032GISEL-NEXT:    s_xor_b32 s8, exec_lo, s8
+; GFX1032GISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX1032GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; GFX1032GISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    s_andn2_saveexec_b32 s2, s8
+; GFX1032GISEL-NEXT:    s_cbranch_execz .LBB9_4
+; GFX1032GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1032GISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    s_mov_b64 s[6:7], s[6:7]
+; GFX1032GISEL-NEXT:  .LBB9_4: ; %endif
+; GFX1032GISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT:    s_endpgm
+;
+; GFX1164DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_clause 0x1
+; GFX1164DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164DAGISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT:    v_cmpx_lt_u32_e32 15, v0
+; GFX1164DAGISEL-NEXT:    s_xor_b64 s[6:7], exec, s[6:7]
+; GFX1164DAGISEL-NEXT:    s_or_saveexec_b64 s[6:7], s[6:7]
+; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT:    s_xor_b64 exec, exec, s[6:7]
+; GFX1164DAGISEL-NEXT:  ; %bb.1: ; %if
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1164DAGISEL-NEXT:  ; %bb.2: ; %endif
+; GFX1164DAGISEL-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT:    s_endpgm
+;
+; GFX1164GISEL-LABEL: divergent_cfg_i64:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164GISEL-NEXT:    s_mov_b64 s[8:9], exec
+; GFX1164GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1164GISEL-NEXT:    v_cmpx_le_u32_e32 16, v0
+; GFX1164GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1164GISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX1164GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; GFX1164GISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_and_not1_saveexec_b64 s[2:3], s[8:9]
+; GFX1164GISEL-NEXT:    s_cbranch_execz .LBB9_4
+; GFX1164GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1164GISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_mov_b64 s[6:7], s[4:5]
+; GFX1164GISEL-NEXT:  .LBB9_4: ; %endif
+; GFX1164GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT:    s_endpgm
+;
+; GFX1132DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_clause 0x1
+; GFX1132DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132DAGISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    v_cmpx_lt_u32_e32 15, v0
+; GFX1132DAGISEL-NEXT:    s_xor_b32 s6, exec_lo, s6
+; GFX1132DAGISEL-NEXT:    s_or_saveexec_b32 s6, s6
+; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT:    s_xor_b32 exec_lo, exec_lo, s6
+; GFX1132DAGISEL-NEXT:  ; %bb.1: ; %if
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX1132DAGISEL-NEXT:  ; %bb.2: ; %endif
+; GFX1132DAGISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s6
+; GFX1132DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT:    s_endpgm
+;
+; GFX1132GISEL-LABEL: divergent_cfg_i64:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132GISEL-NEXT:    s_mov_b32 s8, exec_lo
+; GFX1132GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT:    v_cmpx_le_u32_e32 16, v0
+; GFX1132GISEL-NEXT:    s_xor_b32 s8, exec_lo, s8
+; GFX1132GISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX1132GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; GFX1132GISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_and_not1_saveexec_b32 s2, s8
+; GFX1132GISEL-NEXT:    s_cbranch_execz .LBB9_4
+; GFX1132GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1132GISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_mov_b64 s[6:7], s[4:5]
+; GFX1132GISEL-NEXT:  .LBB9_4: ; %endif
+; GFX1132GISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT:    s_endpgm
+entry:
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %d_cmp = icmp ult i32 %tid, 16
+  br i1 %d_cmp, label %if, label %else
+
+if:
+  %reducedValTid = call i64 @llvm.amdgcn.wave.reduce.and.i64(i64 %in2, i32 1)
+  br label %endif
+
+else:
+  %reducedValIn = call i64 @llvm.amdgcn.wave.reduce.and.i64(i64 %in, i32 1)
+  br label %endif
+
+endif:
+  %combine = phi i64 [%reducedValTid, %if], [%reducedValIn, %else]
+  store i64 %combine, ptr addrspace(1) %out
+  ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.or.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.or.ll
index e08787e6ba70a..1849eaecbe143 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.or.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.or.ll
@@ -980,3 +980,858 @@ endif:
   store i32 %combine, ptr addrspace(1) %out
   ret void
 }
+
+define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: uniform_value_i64:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT:    s_endpgm
+;
+; GFX8GISEL-LABEL: uniform_value_i64:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT:    s_endpgm
+;
+; GFX9DAGISEL-LABEL: uniform_value_i64:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT:    s_endpgm
+;
+; GFX9GISEL-LABEL: uniform_value_i64:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT:    s_endpgm
+;
+; GFX10DAGISEL-LABEL: uniform_value_i64:
+; GFX10DAGISEL:       ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX10DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX10DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT:    s_endpgm
+;
+; GFX10GISEL-LABEL: uniform_value_i64:
+; GFX10GISEL:       ; %bb.0: ; %entry
+; GFX10GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX10GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX10GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT:    s_endpgm
+;
+; GFX1164DAGISEL-LABEL: uniform_value_i64:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT:    s_endpgm
+;
+; GFX1164GISEL-LABEL: uniform_value_i64:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT:    s_endpgm
+;
+; GFX1132DAGISEL-LABEL: uniform_value_i64:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT:    s_endpgm
+;
+; GFX1132GISEL-LABEL: uniform_value_i64:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT:    s_endpgm
+entry:
+  %result = call i64 @llvm.amdgcn.wave.reduce.or.i64(i64 %in, i32 1)
+  store i64 %result, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
+; GFX8DAGISEL-LABEL: const_value_i64:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT:    s_endpgm
+;
+; GFX8GISEL-LABEL: const_value_i64:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT:    s_endpgm
+;
+; GFX9DAGISEL-LABEL: const_value_i64:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT:    s_endpgm
+;
+; GFX9GISEL-LABEL: const_value_i64:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT:    s_endpgm
+;
+; GFX10DAGISEL-LABEL: const_value_i64:
+; GFX10DAGISEL:       ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX10DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT:    s_endpgm
+;
+; GFX10GISEL-LABEL: const_value_i64:
+; GFX10GISEL:       ; %bb.0: ; %entry
+; GFX10GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10GISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX10GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX10GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT:    s_endpgm
+;
+; GFX1164DAGISEL-LABEL: const_value_i64:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT:    s_endpgm
+;
+; GFX1164GISEL-LABEL: const_value_i64:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT:    s_endpgm
+;
+; GFX1132DAGISEL-LABEL: const_value_i64:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
+; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT:    s_endpgm
+;
+; GFX1132GISEL-LABEL: const_value_i64:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132GISEL-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT:    s_endpgm
+entry:
+  %result = call i64 @llvm.amdgcn.wave.reduce.or.i64(i64 123, i32 1)
+  store i64 %result, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: poison_value_i64:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[0:1], v[0:1]
+; GFX8DAGISEL-NEXT:    s_endpgm
+;
+; GFX8GISEL-LABEL: poison_value_i64:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[0:1], v[0:1]
+; GFX8GISEL-NEXT:    s_endpgm
+;
+; GFX9DAGISEL-LABEL: poison_value_i64:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT:    s_endpgm
+;
+; GFX9GISEL-LABEL: poison_value_i64:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX9GISEL-NEXT:    s_endpgm
+;
+; GFX10DAGISEL-LABEL: poison_value_i64:
+; GFX10DAGISEL:       ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10DAGISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX10DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT:    global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT:    s_endpgm
+;
+; GFX10GISEL-LABEL: poison_value_i64:
+; GFX10GISEL:       ; %bb.0: ; %entry
+; GFX10GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10GISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX10GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT:    global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX10GISEL-NEXT:    s_endpgm
+;
+; GFX11DAGISEL-LABEL: poison_value_i64:
+; GFX11DAGISEL:       ; %bb.0: ; %entry
+; GFX11DAGISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX11DAGISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX11DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11DAGISEL-NEXT:    global_store_b64 v0, v[0:1], s[0:1]
+; GFX11DAGISEL-NEXT:    s_endpgm
+;
+; GFX11GISEL-LABEL: poison_value_i64:
+; GFX11GISEL:       ; %bb.0: ; %entry
+; GFX11GISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX11GISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX11GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11GISEL-NEXT:    global_store_b64 v0, v[0:1], s[0:1]
+; GFX11GISEL-NEXT:    s_endpgm
+entry:
+  %result = call i64 @llvm.amdgcn.wave.reduce.or.i64(i64 poison, i32 1)
+  store i64 %result, ptr addrspace(1) %out
+  ret void
+}
+
+define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
+; GFX8DAGISEL-LABEL: divergent_value_i64:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT:    s_mov_b32 s5, s4
+; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
+; GFX8DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX8DAGISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX8DAGISEL-NEXT:    s_bitset0_b64 s[6:7], s10
+; GFX8DAGISEL-NEXT:    s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX8DAGISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX8DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX8DAGISEL-NEXT:  ; %bb.2:
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX8DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8GISEL-LABEL: divergent_value_i64:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT:    s_mov_b32 s5, s4
+; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8GISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
+; GFX8GISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX8GISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX8GISEL-NEXT:    s_bitset0_b64 s[6:7], s10
+; GFX8GISEL-NEXT:    s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX8GISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX8GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX8GISEL-NEXT:  ; %bb.2:
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX8GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9DAGISEL-LABEL: divergent_value_i64:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT:    s_mov_b32 s5, s4
+; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
+; GFX9DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX9DAGISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX9DAGISEL-NEXT:    s_bitset0_b64 s[6:7], s10
+; GFX9DAGISEL-NEXT:    s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX9DAGISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX9DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX9DAGISEL-NEXT:  ; %bb.2:
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX9DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9GISEL-LABEL: divergent_value_i64:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT:    s_mov_b32 s5, s4
+; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9GISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
+; GFX9GISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX9GISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX9GISEL-NEXT:    s_bitset0_b64 s[6:7], s10
+; GFX9GISEL-NEXT:    s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX9GISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX9GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX9GISEL-NEXT:  ; %bb.2:
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX9GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX9GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1064DAGISEL-LABEL: divergent_value_i64:
+; GFX1064DAGISEL:       ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT:    s_mov_b32 s5, s4
+; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
+; GFX1064DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX1064DAGISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX1064DAGISEL-NEXT:    s_bitset0_b64 s[6:7], s10
+; GFX1064DAGISEL-NEXT:    s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX1064DAGISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX1064DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1064DAGISEL-NEXT:  ; %bb.2:
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1064DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1064GISEL-LABEL: divergent_value_i64:
+; GFX1064GISEL:       ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT:    s_mov_b32 s5, s4
+; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
+; GFX1064GISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX1064GISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX1064GISEL-NEXT:    s_bitset0_b64 s[6:7], s10
+; GFX1064GISEL-NEXT:    s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX1064GISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX1064GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1064GISEL-NEXT:  ; %bb.2:
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1064GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1032DAGISEL-LABEL: divergent_value_i64:
+; GFX1032DAGISEL:       ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s5, s4
+; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
+; GFX1032DAGISEL-NEXT:    v_readlane_b32 s8, v2, s7
+; GFX1032DAGISEL-NEXT:    v_readlane_b32 s9, v3, s7
+; GFX1032DAGISEL-NEXT:    s_bitset0_b32 s6, s7
+; GFX1032DAGISEL-NEXT:    s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX1032DAGISEL-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX1032DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1032DAGISEL-NEXT:  ; %bb.2:
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1032DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1032GISEL-LABEL: divergent_value_i64:
+; GFX1032GISEL:       ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT:    s_mov_b32 s5, s4
+; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
+; GFX1032GISEL-NEXT:    v_readlane_b32 s8, v2, s7
+; GFX1032GISEL-NEXT:    v_readlane_b32 s9, v3, s7
+; GFX1032GISEL-NEXT:    s_bitset0_b32 s6, s7
+; GFX1032GISEL-NEXT:    s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX1032GISEL-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX1032GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1032GISEL-NEXT:  ; %bb.2:
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1032GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1164DAGISEL-LABEL: divergent_value_i64:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s6, s[2:3]
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT:    v_readlane_b32 s4, v2, s6
+; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v3, s6
+; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s6
+; GFX1164DAGISEL-NEXT:    s_or_b64 s[0:1], s[0:1], s[4:5]
+; GFX1164DAGISEL-NEXT:    s_cmp_lg_u64 s[2:3], 0
+; GFX1164DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1164DAGISEL-NEXT:  ; %bb.2:
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX1164DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1164DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1164GISEL-LABEL: divergent_value_i64:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s6, s[2:3]
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT:    v_readlane_b32 s4, v2, s6
+; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v3, s6
+; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s6
+; GFX1164GISEL-NEXT:    s_or_b64 s[0:1], s[0:1], s[4:5]
+; GFX1164GISEL-NEXT:    s_cmp_lg_u64 s[2:3], 0
+; GFX1164GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1164GISEL-NEXT:  ; %bb.2:
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX1164GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1164GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1132DAGISEL-LABEL: divergent_value_i64:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
+; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
+; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
+; GFX1132DAGISEL-NEXT:    s_or_b64 s[0:1], s[0:1], s[4:5]
+; GFX1132DAGISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX1132DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1132DAGISEL-NEXT:  ; %bb.2:
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1132DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1132GISEL-LABEL: divergent_value_i64:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
+; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
+; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
+; GFX1132GISEL-NEXT:    s_or_b64 s[0:1], s[0:1], s[4:5]
+; GFX1132GISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX1132GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1132GISEL-NEXT:  ; %bb.2:
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1132GISEL-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  ; %id.x = call i32 @llvm.amdgcn.workitem.id.x()
+  %result = call i64 @llvm.amdgcn.wave.reduce.or.i64(i64 %id.x, i32 1)
+  store i64 %result, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64 %in2) {
+; GFX8DAGISEL-LABEL: divergent_cfg_i64:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX8DAGISEL-NEXT:    s_and_saveexec_b64 s[6:7], vcc
+; GFX8DAGISEL-NEXT:    s_xor_b64 s[6:7], exec, s[6:7]
+; GFX8DAGISEL-NEXT:    s_or_saveexec_b64 s[6:7], s[6:7]
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX8DAGISEL-NEXT:    s_xor_b64 exec, exec, s[6:7]
+; GFX8DAGISEL-NEXT:  ; %bb.1: ; %if
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX8DAGISEL-NEXT:  ; %bb.2: ; %endif
+; GFX8DAGISEL-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT:    s_endpgm
+;
+; GFX8GISEL-LABEL: divergent_cfg_i64:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT:    v_cmp_le_u32_e32 vcc, 16, v0
+; GFX8GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX8GISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX8GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8GISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX8GISEL-NEXT:  ; %bb.1: ; %else
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; GFX8GISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX8GISEL-NEXT:    s_cbranch_execz .LBB9_4
+; GFX8GISEL-NEXT:  ; %bb.3: ; %if
+; GFX8GISEL-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], s[4:5]
+; GFX8GISEL-NEXT:  .LBB9_4: ; %endif
+; GFX8GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT:    s_endpgm
+;
+; GFX9DAGISEL-LABEL: divergent_cfg_i64:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX9DAGISEL-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX9DAGISEL-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
+; GFX9DAGISEL-NEXT:    s_or_saveexec_b64 s[4:5], s[4:5]
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT:    s_xor_b64 exec, exec, s[4:5]
+; GFX9DAGISEL-NEXT:  ; %bb.1: ; %if
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9DAGISEL-NEXT:  ; %bb.2: ; %endif
+; GFX9DAGISEL-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT:    s_endpgm
+;
+; GFX9GISEL-LABEL: divergent_cfg_i64:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT:    v_cmp_le_u32_e32 vcc, 16, v0
+; GFX9GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX9GISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX9GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9GISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX9GISEL-NEXT:  ; %bb.1: ; %else
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; GFX9GISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX9GISEL-NEXT:    s_cbranch_execz .LBB9_4
+; GFX9GISEL-NEXT:  ; %bb.3: ; %if
+; GFX9GISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], s[6:7]
+; GFX9GISEL-NEXT:  .LBB9_4: ; %endif
+; GFX9GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT:    s_endpgm
+;
+; GFX1064DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1064DAGISEL:       ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT:    s_clause 0x1
+; GFX1064DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX1064DAGISEL-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX1064DAGISEL-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
+; GFX1064DAGISEL-NEXT:    s_or_saveexec_b64 s[4:5], s[4:5]
+; GFX1064DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT:    s_xor_b64 exec, exec, s[4:5]
+; GFX1064DAGISEL-NEXT:  ; %bb.1: ; %if
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1064DAGISEL-NEXT:  ; %bb.2: ; %endif
+; GFX1064DAGISEL-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT:    s_endpgm
+;
+; GFX1064GISEL-LABEL: divergent_cfg_i64:
+; GFX1064GISEL:       ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT:    v_cmp_le_u32_e32 vcc, 16, v0
+; GFX1064GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1064GISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX1064GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1064GISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX1064GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; GFX1064GISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX1064GISEL-NEXT:    s_cbranch_execz .LBB9_4
+; GFX1064GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1064GISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], s[6:7]
+; GFX1064GISEL-NEXT:  .LBB9_4: ; %endif
+; GFX1064GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT:    s_endpgm
+;
+; GFX1032DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1032DAGISEL:       ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT:    s_clause 0x1
+; GFX1032DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc_lo, 15, v0
+; GFX1032DAGISEL-NEXT:    s_and_saveexec_b32 s4, vcc_lo
+; GFX1032DAGISEL-NEXT:    s_xor_b32 s4, exec_lo, s4
+; GFX1032DAGISEL-NEXT:    s_or_saveexec_b32 s4, s4
+; GFX1032DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT:    s_xor_b32 exec_lo, exec_lo, s4
+; GFX1032DAGISEL-NEXT:  ; %bb.1: ; %if
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1032DAGISEL-NEXT:  ; %bb.2: ; %endif
+; GFX1032DAGISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s4
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT:    s_endpgm
+;
+; GFX1032GISEL-LABEL: divergent_cfg_i64:
+; GFX1032GISEL:       ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT:    v_cmp_le_u32_e32 vcc_lo, 16, v0
+; GFX1032GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1032GISEL-NEXT:    s_and_saveexec_b32 s8, vcc_lo
+; GFX1032GISEL-NEXT:    s_xor_b32 s8, exec_lo, s8
+; GFX1032GISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX1032GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; GFX1032GISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    s_andn2_saveexec_b32 s2, s8
+; GFX1032GISEL-NEXT:    s_cbranch_execz .LBB9_4
+; GFX1032GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1032GISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    s_mov_b64 s[6:7], s[6:7]
+; GFX1032GISEL-NEXT:  .LBB9_4: ; %endif
+; GFX1032GISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT:    s_endpgm
+;
+; GFX1164DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_clause 0x1
+; GFX1164DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164DAGISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT:    v_cmpx_lt_u32_e32 15, v0
+; GFX1164DAGISEL-NEXT:    s_xor_b64 s[6:7], exec, s[6:7]
+; GFX1164DAGISEL-NEXT:    s_or_saveexec_b64 s[6:7], s[6:7]
+; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT:    s_xor_b64 exec, exec, s[6:7]
+; GFX1164DAGISEL-NEXT:  ; %bb.1: ; %if
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1164DAGISEL-NEXT:  ; %bb.2: ; %endif
+; GFX1164DAGISEL-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT:    s_endpgm
+;
+; GFX1164GISEL-LABEL: divergent_cfg_i64:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164GISEL-NEXT:    s_mov_b64 s[8:9], exec
+; GFX1164GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1164GISEL-NEXT:    v_cmpx_le_u32_e32 16, v0
+; GFX1164GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1164GISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX1164GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; GFX1164GISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_and_not1_saveexec_b64 s[2:3], s[8:9]
+; GFX1164GISEL-NEXT:    s_cbranch_execz .LBB9_4
+; GFX1164GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1164GISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_mov_b64 s[6:7], s[4:5]
+; GFX1164GISEL-NEXT:  .LBB9_4: ; %endif
+; GFX1164GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT:    s_endpgm
+;
+; GFX1132DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_clause 0x1
+; GFX1132DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132DAGISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    v_cmpx_lt_u32_e32 15, v0
+; GFX1132DAGISEL-NEXT:    s_xor_b32 s6, exec_lo, s6
+; GFX1132DAGISEL-NEXT:    s_or_saveexec_b32 s6, s6
+; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT:    s_xor_b32 exec_lo, exec_lo, s6
+; GFX1132DAGISEL-NEXT:  ; %bb.1: ; %if
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX1132DAGISEL-NEXT:  ; %bb.2: ; %endif
+; GFX1132DAGISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s6
+; GFX1132DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT:    s_endpgm
+;
+; GFX1132GISEL-LABEL: divergent_cfg_i64:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132GISEL-NEXT:    s_mov_b32 s8, exec_lo
+; GFX1132GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT:    v_cmpx_le_u32_e32 16, v0
+; GFX1132GISEL-NEXT:    s_xor_b32 s8, exec_lo, s8
+; GFX1132GISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX1132GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; GFX1132GISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_and_not1_saveexec_b32 s2, s8
+; GFX1132GISEL-NEXT:    s_cbranch_execz .LBB9_4
+; GFX1132GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1132GISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_mov_b64 s[6:7], s[4:5]
+; GFX1132GISEL-NEXT:  .LBB9_4: ; %endif
+; GFX1132GISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT:    s_endpgm
+entry:
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %d_cmp = icmp ult i32 %tid, 16
+  br i1 %d_cmp, label %if, label %else
+
+if:
+  %reducedValTid = call i64 @llvm.amdgcn.wave.reduce.or.i64(i64 %in2, i32 1)
+  br label %endif
+
+else:
+  %reducedValIn = call i64 @llvm.amdgcn.wave.reduce.or.i64(i64 %in, i32 1)
+  br label %endif
+
+endif:
+  %combine = phi i64 [%reducedValTid, %if], [%reducedValIn, %else]
+  store i64 %combine, ptr addrspace(1) %out
+  ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll
index 5b21d5c3aaeb6..b96954d030fef 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll
@@ -1279,6 +1279,1419 @@ endif:
   store i32 %combine, ptr addrspace(1) %out
   ret void
 }
+
+define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: uniform_value_i64:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX8DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX8DAGISEL-NEXT:    s_and_b32 s4, s4, 1
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT:    s_mul_i32 s0, s2, s4
+; GFX8DAGISEL-NEXT:    s_mul_i32 s1, s3, s4
+; GFX8DAGISEL-NEXT:    s_mul_hi_u32 s2, s2, s4
+; GFX8DAGISEL-NEXT:    s_add_u32 s1, s2, s1
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT:    s_endpgm
+;
+; GFX8GISEL-LABEL: uniform_value_i64:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX8GISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX8GISEL-NEXT:    s_and_b32 s5, s4, 1
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    s_mul_i32 s4, s2, s5
+; GFX8GISEL-NEXT:    s_mul_i32 s3, s3, s5
+; GFX8GISEL-NEXT:    s_mul_hi_u32 s2, s2, s5
+; GFX8GISEL-NEXT:    s_add_u32 s5, s2, s3
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT:    s_endpgm
+;
+; GFX9DAGISEL-LABEL: uniform_value_i64:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9DAGISEL-NEXT:    s_and_b32 s5, s4, 1
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    s_mul_i32 s4, s2, s5
+; GFX9DAGISEL-NEXT:    s_mul_i32 s3, s3, s5
+; GFX9DAGISEL-NEXT:    s_mul_hi_u32 s2, s2, s5
+; GFX9DAGISEL-NEXT:    s_add_u32 s5, s2, s3
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT:    s_endpgm
+;
+; GFX9GISEL-LABEL: uniform_value_i64:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9GISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9GISEL-NEXT:    s_and_b32 s5, s4, 1
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    s_mul_i32 s4, s2, s5
+; GFX9GISEL-NEXT:    s_mul_i32 s3, s3, s5
+; GFX9GISEL-NEXT:    s_mul_hi_u32 s2, s2, s5
+; GFX9GISEL-NEXT:    s_add_u32 s5, s2, s3
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT:    s_endpgm
+;
+; GFX1064DAGISEL-LABEL: uniform_value_i64:
+; GFX1064DAGISEL:       ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064DAGISEL-NEXT:    s_and_b32 s4, s4, 1
+; GFX1064DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT:    s_mul_i32 s3, s3, s4
+; GFX1064DAGISEL-NEXT:    s_mul_hi_u32 s5, s2, s4
+; GFX1064DAGISEL-NEXT:    s_mul_i32 s2, s2, s4
+; GFX1064DAGISEL-NEXT:    s_add_u32 s3, s5, s3
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT:    s_endpgm
+;
+; GFX1064GISEL-LABEL: uniform_value_i64:
+; GFX1064GISEL:       ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064GISEL-NEXT:    s_and_b32 s4, s4, 1
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    s_mul_i32 s3, s3, s4
+; GFX1064GISEL-NEXT:    s_mul_hi_u32 s5, s2, s4
+; GFX1064GISEL-NEXT:    s_mul_i32 s2, s2, s4
+; GFX1064GISEL-NEXT:    s_add_u32 s3, s5, s3
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1064GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT:    s_endpgm
+;
+; GFX1032DAGISEL-LABEL: uniform_value_i64:
+; GFX1032DAGISEL:       ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, exec_lo
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT:    s_bcnt1_i32_b32 s4, s4
+; GFX1032DAGISEL-NEXT:    s_and_b32 s4, s4, 1
+; GFX1032DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT:    s_mul_i32 s3, s3, s4
+; GFX1032DAGISEL-NEXT:    s_mul_hi_u32 s5, s2, s4
+; GFX1032DAGISEL-NEXT:    s_mul_i32 s2, s2, s4
+; GFX1032DAGISEL-NEXT:    s_add_u32 s3, s5, s3
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT:    s_endpgm
+;
+; GFX1032GISEL-LABEL: uniform_value_i64:
+; GFX1032GISEL:       ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT:    s_mov_b32 s4, exec_lo
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT:    s_bcnt1_i32_b32 s4, s4
+; GFX1032GISEL-NEXT:    s_and_b32 s4, s4, 1
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    s_mul_i32 s3, s3, s4
+; GFX1032GISEL-NEXT:    s_mul_hi_u32 s5, s2, s4
+; GFX1032GISEL-NEXT:    s_mul_i32 s2, s2, s4
+; GFX1032GISEL-NEXT:    s_add_u32 s3, s5, s3
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1032GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT:    s_endpgm
+;
+; GFX1164DAGISEL-LABEL: uniform_value_i64:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT:    s_and_b32 s4, s4, 1
+; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    s_mul_i32 s3, s3, s4
+; GFX1164DAGISEL-NEXT:    s_mul_hi_u32 s5, s2, s4
+; GFX1164DAGISEL-NEXT:    s_mul_i32 s2, s2, s4
+; GFX1164DAGISEL-NEXT:    s_add_u32 s3, s5, s3
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT:    s_endpgm
+;
+; GFX1164GISEL-LABEL: uniform_value_i64:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT:    s_and_b32 s4, s4, 1
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_mul_i32 s3, s3, s4
+; GFX1164GISEL-NEXT:    s_mul_hi_u32 s5, s2, s4
+; GFX1164GISEL-NEXT:    s_mul_i32 s2, s2, s4
+; GFX1164GISEL-NEXT:    s_add_u32 s3, s5, s3
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT:    s_endpgm
+;
+; GFX1132DAGISEL-LABEL: uniform_value_i64:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s4, exec_lo
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    s_bcnt1_i32_b32 s4, s4
+; GFX1132DAGISEL-NEXT:    s_and_b32 s4, s4, 1
+; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    s_mul_i32 s3, s3, s4
+; GFX1132DAGISEL-NEXT:    s_mul_hi_u32 s5, s2, s4
+; GFX1132DAGISEL-NEXT:    s_mul_i32 s2, s2, s4
+; GFX1132DAGISEL-NEXT:    s_add_u32 s3, s5, s3
+; GFX1132DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT:    s_endpgm
+;
+; GFX1132GISEL-LABEL: uniform_value_i64:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT:    s_mov_b32 s4, exec_lo
+; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT:    s_bcnt1_i32_b32 s4, s4
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT:    s_and_b32 s4, s4, 1
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_mul_i32 s3, s3, s4
+; GFX1132GISEL-NEXT:    s_mul_hi_u32 s5, s2, s4
+; GFX1132GISEL-NEXT:    s_mul_i32 s2, s2, s4
+; GFX1132GISEL-NEXT:    s_add_u32 s3, s5, s3
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT:    s_endpgm
+entry:
+  %result = call i64 @llvm.amdgcn.wave.reduce.xor.i64(i64 %in, i32 1)
+  store i64 %result, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
+; GFX8DAGISEL-LABEL: const_value_i64:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX8DAGISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX8DAGISEL-NEXT:    s_and_b32 s3, s2, 1
+; GFX8DAGISEL-NEXT:    s_mul_i32 s2, s3, 0x7b
+; GFX8DAGISEL-NEXT:    s_mul_i32 s4, s3, 0
+; GFX8DAGISEL-NEXT:    s_mul_hi_u32 s3, 0x7b, s3
+; GFX8DAGISEL-NEXT:    s_add_u32 s3, s3, s4
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT:    s_endpgm
+;
+; GFX8GISEL-LABEL: const_value_i64:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX8GISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX8GISEL-NEXT:    s_and_b32 s3, s2, 1
+; GFX8GISEL-NEXT:    s_mul_i32 s2, s3, 0x7b
+; GFX8GISEL-NEXT:    s_mul_i32 s4, s3, 0
+; GFX8GISEL-NEXT:    s_mul_hi_u32 s3, 0x7b, s3
+; GFX8GISEL-NEXT:    s_add_u32 s3, s3, s4
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT:    s_endpgm
+;
+; GFX9DAGISEL-LABEL: const_value_i64:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX9DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9DAGISEL-NEXT:    s_and_b32 s3, s2, 1
+; GFX9DAGISEL-NEXT:    s_mul_i32 s2, s3, 0x7b
+; GFX9DAGISEL-NEXT:    s_mul_i32 s4, s3, 0
+; GFX9DAGISEL-NEXT:    s_mul_hi_u32 s3, 0x7b, s3
+; GFX9DAGISEL-NEXT:    s_add_u32 s3, s3, s4
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT:    s_endpgm
+;
+; GFX9GISEL-LABEL: const_value_i64:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX9GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9GISEL-NEXT:    s_and_b32 s3, s2, 1
+; GFX9GISEL-NEXT:    s_mul_i32 s2, s3, 0x7b
+; GFX9GISEL-NEXT:    s_mul_i32 s4, s3, 0
+; GFX9GISEL-NEXT:    s_mul_hi_u32 s3, 0x7b, s3
+; GFX9GISEL-NEXT:    s_add_u32 s3, s3, s4
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT:    s_endpgm
+;
+; GFX1064DAGISEL-LABEL: const_value_i64:
+; GFX1064DAGISEL:       ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064DAGISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1064DAGISEL-NEXT:    s_mul_i32 s3, s2, 0
+; GFX1064DAGISEL-NEXT:    s_mul_hi_u32 s4, 0x7b, s2
+; GFX1064DAGISEL-NEXT:    s_mulk_i32 s2, 0x7b
+; GFX1064DAGISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT:    s_endpgm
+;
+; GFX1064GISEL-LABEL: const_value_i64:
+; GFX1064GISEL:       ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1064GISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1064GISEL-NEXT:    s_mul_i32 s3, s2, 0
+; GFX1064GISEL-NEXT:    s_mul_hi_u32 s4, 0x7b, s2
+; GFX1064GISEL-NEXT:    s_mulk_i32 s2, 0x7b
+; GFX1064GISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT:    s_endpgm
+;
+; GFX1032DAGISEL-LABEL: const_value_i64:
+; GFX1032DAGISEL:       ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT:    s_bcnt1_i32_b32 s2, s2
+; GFX1032DAGISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1032DAGISEL-NEXT:    s_mul_i32 s3, s2, 0
+; GFX1032DAGISEL-NEXT:    s_mul_hi_u32 s4, 0x7b, s2
+; GFX1032DAGISEL-NEXT:    s_mulk_i32 s2, 0x7b
+; GFX1032DAGISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT:    s_endpgm
+;
+; GFX1032GISEL-LABEL: const_value_i64:
+; GFX1032GISEL:       ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1032GISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT:    s_bcnt1_i32_b32 s2, s2
+; GFX1032GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1032GISEL-NEXT:    s_mul_i32 s3, s2, 0
+; GFX1032GISEL-NEXT:    s_mul_hi_u32 s4, 0x7b, s2
+; GFX1032GISEL-NEXT:    s_mulk_i32 s2, 0x7b
+; GFX1032GISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT:    s_endpgm
+;
+; GFX1164DAGISEL-LABEL: const_value_i64:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1164DAGISEL-NEXT:    s_mul_i32 s3, s2, 0
+; GFX1164DAGISEL-NEXT:    s_mul_hi_u32 s4, 0x7b, s2
+; GFX1164DAGISEL-NEXT:    s_mulk_i32 s2, 0x7b
+; GFX1164DAGISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT:    s_endpgm
+;
+; GFX1164GISEL-LABEL: const_value_i64:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1164GISEL-NEXT:    s_mul_i32 s3, s2, 0
+; GFX1164GISEL-NEXT:    s_mul_hi_u32 s4, 0x7b, s2
+; GFX1164GISEL-NEXT:    s_mulk_i32 s2, 0x7b
+; GFX1164GISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT:    s_endpgm
+;
+; GFX1132DAGISEL-LABEL: const_value_i64:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    s_bcnt1_i32_b32 s2, s2
+; GFX1132DAGISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    s_mul_i32 s3, s2, 0
+; GFX1132DAGISEL-NEXT:    s_mul_hi_u32 s4, 0x7b, s2
+; GFX1132DAGISEL-NEXT:    s_mulk_i32 s2, 0x7b
+; GFX1132DAGISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1132DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT:    s_endpgm
+;
+; GFX1132GISEL-LABEL: const_value_i64:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT:    s_bcnt1_i32_b32 s2, s2
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1132GISEL-NEXT:    s_mul_i32 s3, s2, 0
+; GFX1132GISEL-NEXT:    s_mul_hi_u32 s4, 0x7b, s2
+; GFX1132GISEL-NEXT:    s_mulk_i32 s2, 0x7b
+; GFX1132GISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT:    s_endpgm
+entry:
+  %result = call i64 @llvm.amdgcn.wave.reduce.xor.i64(i64 123, i32 1)
+  store i64 %result, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: poison_value_i64:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX8DAGISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX8DAGISEL-NEXT:    s_and_b32 s3, s2, 1
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    s_mul_i32 s2, s0, s3
+; GFX8DAGISEL-NEXT:    s_mul_i32 s4, s1, s3
+; GFX8DAGISEL-NEXT:    s_mul_hi_u32 s3, s0, s3
+; GFX8DAGISEL-NEXT:    s_add_u32 s3, s3, s4
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT:    s_endpgm
+;
+; GFX8GISEL-LABEL: poison_value_i64:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX8GISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX8GISEL-NEXT:    s_and_b32 s3, s2, 1
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    s_mul_i32 s2, s0, s3
+; GFX8GISEL-NEXT:    s_mul_i32 s4, s1, s3
+; GFX8GISEL-NEXT:    s_mul_hi_u32 s3, s0, s3
+; GFX8GISEL-NEXT:    s_add_u32 s3, s3, s4
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT:    s_endpgm
+;
+; GFX9DAGISEL-LABEL: poison_value_i64:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX9DAGISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9DAGISEL-NEXT:    s_and_b32 s3, s2, 1
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    s_mul_i32 s2, s0, s3
+; GFX9DAGISEL-NEXT:    s_mul_i32 s4, s1, s3
+; GFX9DAGISEL-NEXT:    s_mul_hi_u32 s3, s0, s3
+; GFX9DAGISEL-NEXT:    s_add_u32 s3, s3, s4
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT:    s_endpgm
+;
+; GFX9GISEL-LABEL: poison_value_i64:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX9GISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9GISEL-NEXT:    s_and_b32 s3, s2, 1
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    s_mul_i32 s2, s0, s3
+; GFX9GISEL-NEXT:    s_mul_i32 s4, s1, s3
+; GFX9GISEL-NEXT:    s_mul_hi_u32 s3, s0, s3
+; GFX9GISEL-NEXT:    s_add_u32 s3, s3, s4
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT:    s_endpgm
+;
+; GFX1064DAGISEL-LABEL: poison_value_i64:
+; GFX1064DAGISEL:       ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064DAGISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1064DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT:    s_mul_i32 s3, s1, s2
+; GFX1064DAGISEL-NEXT:    s_mul_hi_u32 s4, s0, s2
+; GFX1064DAGISEL-NEXT:    s_mul_i32 s2, s0, s2
+; GFX1064DAGISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT:    s_endpgm
+;
+; GFX1064GISEL-LABEL: poison_value_i64:
+; GFX1064GISEL:       ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1064GISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    s_mul_i32 s3, s1, s2
+; GFX1064GISEL-NEXT:    s_mul_hi_u32 s4, s0, s2
+; GFX1064GISEL-NEXT:    s_mul_i32 s2, s0, s2
+; GFX1064GISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1064GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT:    s_endpgm
+;
+; GFX1032DAGISEL-LABEL: poison_value_i64:
+; GFX1032DAGISEL:       ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT:    s_bcnt1_i32_b32 s2, s2
+; GFX1032DAGISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1032DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT:    s_mul_i32 s3, s1, s2
+; GFX1032DAGISEL-NEXT:    s_mul_hi_u32 s4, s0, s2
+; GFX1032DAGISEL-NEXT:    s_mul_i32 s2, s0, s2
+; GFX1032DAGISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT:    s_endpgm
+;
+; GFX1032GISEL-LABEL: poison_value_i64:
+; GFX1032GISEL:       ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1032GISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT:    s_bcnt1_i32_b32 s2, s2
+; GFX1032GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    s_mul_i32 s3, s1, s2
+; GFX1032GISEL-NEXT:    s_mul_hi_u32 s4, s0, s2
+; GFX1032GISEL-NEXT:    s_mul_i32 s2, s0, s2
+; GFX1032GISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1032GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT:    s_endpgm
+;
+; GFX1164DAGISEL-LABEL: poison_value_i64:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    s_mul_i32 s3, s1, s2
+; GFX1164DAGISEL-NEXT:    s_mul_hi_u32 s4, s0, s2
+; GFX1164DAGISEL-NEXT:    s_mul_i32 s2, s0, s2
+; GFX1164DAGISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT:    s_endpgm
+;
+; GFX1164GISEL-LABEL: poison_value_i64:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_mul_i32 s3, s1, s2
+; GFX1164GISEL-NEXT:    s_mul_hi_u32 s4, s0, s2
+; GFX1164GISEL-NEXT:    s_mul_i32 s2, s0, s2
+; GFX1164GISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT:    s_endpgm
+;
+; GFX1132DAGISEL-LABEL: poison_value_i64:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    s_bcnt1_i32_b32 s2, s2
+; GFX1132DAGISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    s_mul_i32 s3, s1, s2
+; GFX1132DAGISEL-NEXT:    s_mul_hi_u32 s4, s0, s2
+; GFX1132DAGISEL-NEXT:    s_mul_i32 s2, s0, s2
+; GFX1132DAGISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1132DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT:    s_endpgm
+;
+; GFX1132GISEL-LABEL: poison_value_i64:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT:    s_bcnt1_i32_b32 s2, s2
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_mul_i32 s3, s1, s2
+; GFX1132GISEL-NEXT:    s_mul_hi_u32 s4, s0, s2
+; GFX1132GISEL-NEXT:    s_mul_i32 s2, s0, s2
+; GFX1132GISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT:    s_endpgm
+entry:
+  %result = call i64 @llvm.amdgcn.wave.reduce.xor.i64(i64 poison, i32 1)
+  store i64 %result, ptr addrspace(1) %out
+  ret void
+}
+
+define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
+; GFX8DAGISEL-LABEL: divergent_value_i64:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT:    s_mov_b32 s5, s4
+; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
+; GFX8DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX8DAGISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX8DAGISEL-NEXT:    s_bitset0_b64 s[6:7], s10
+; GFX8DAGISEL-NEXT:    s_xor_b64 s[4:5], s[4:5], s[8:9]
+; GFX8DAGISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX8DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX8DAGISEL-NEXT:  ; %bb.2:
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX8DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8GISEL-LABEL: divergent_value_i64:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT:    s_mov_b32 s5, s4
+; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8GISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
+; GFX8GISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX8GISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX8GISEL-NEXT:    s_bitset0_b64 s[6:7], s10
+; GFX8GISEL-NEXT:    s_xor_b64 s[4:5], s[4:5], s[8:9]
+; GFX8GISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX8GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX8GISEL-NEXT:  ; %bb.2:
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX8GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9DAGISEL-LABEL: divergent_value_i64:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT:    s_mov_b32 s5, s4
+; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
+; GFX9DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX9DAGISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX9DAGISEL-NEXT:    s_bitset0_b64 s[6:7], s10
+; GFX9DAGISEL-NEXT:    s_xor_b64 s[4:5], s[4:5], s[8:9]
+; GFX9DAGISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX9DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX9DAGISEL-NEXT:  ; %bb.2:
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX9DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9GISEL-LABEL: divergent_value_i64:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT:    s_mov_b32 s5, s4
+; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9GISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
+; GFX9GISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX9GISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX9GISEL-NEXT:    s_bitset0_b64 s[6:7], s10
+; GFX9GISEL-NEXT:    s_xor_b64 s[4:5], s[4:5], s[8:9]
+; GFX9GISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX9GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX9GISEL-NEXT:  ; %bb.2:
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX9GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX9GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1064DAGISEL-LABEL: divergent_value_i64:
+; GFX1064DAGISEL:       ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT:    s_mov_b32 s5, s4
+; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
+; GFX1064DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX1064DAGISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX1064DAGISEL-NEXT:    s_bitset0_b64 s[6:7], s10
+; GFX1064DAGISEL-NEXT:    s_xor_b64 s[4:5], s[4:5], s[8:9]
+; GFX1064DAGISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX1064DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1064DAGISEL-NEXT:  ; %bb.2:
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1064DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1064GISEL-LABEL: divergent_value_i64:
+; GFX1064GISEL:       ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT:    s_mov_b32 s5, s4
+; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
+; GFX1064GISEL-NEXT:    v_readlane_b32 s8, v2, s10
+; GFX1064GISEL-NEXT:    v_readlane_b32 s9, v3, s10
+; GFX1064GISEL-NEXT:    s_bitset0_b64 s[6:7], s10
+; GFX1064GISEL-NEXT:    s_xor_b64 s[4:5], s[4:5], s[8:9]
+; GFX1064GISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX1064GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1064GISEL-NEXT:  ; %bb.2:
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1064GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1032DAGISEL-LABEL: divergent_value_i64:
+; GFX1032DAGISEL:       ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s5, s4
+; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
+; GFX1032DAGISEL-NEXT:    v_readlane_b32 s8, v2, s7
+; GFX1032DAGISEL-NEXT:    v_readlane_b32 s9, v3, s7
+; GFX1032DAGISEL-NEXT:    s_bitset0_b32 s6, s7
+; GFX1032DAGISEL-NEXT:    s_xor_b64 s[4:5], s[4:5], s[8:9]
+; GFX1032DAGISEL-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX1032DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1032DAGISEL-NEXT:  ; %bb.2:
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1032DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1032GISEL-LABEL: divergent_value_i64:
+; GFX1032GISEL:       ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT:    s_mov_b32 s5, s4
+; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
+; GFX1032GISEL-NEXT:    v_readlane_b32 s8, v2, s7
+; GFX1032GISEL-NEXT:    v_readlane_b32 s9, v3, s7
+; GFX1032GISEL-NEXT:    s_bitset0_b32 s6, s7
+; GFX1032GISEL-NEXT:    s_xor_b64 s[4:5], s[4:5], s[8:9]
+; GFX1032GISEL-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX1032GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1032GISEL-NEXT:  ; %bb.2:
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX1032GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1164DAGISEL-LABEL: divergent_value_i64:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s6, s[2:3]
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT:    v_readlane_b32 s4, v2, s6
+; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v3, s6
+; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s6
+; GFX1164DAGISEL-NEXT:    s_xor_b64 s[0:1], s[0:1], s[4:5]
+; GFX1164DAGISEL-NEXT:    s_cmp_lg_u64 s[2:3], 0
+; GFX1164DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1164DAGISEL-NEXT:  ; %bb.2:
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX1164DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1164DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1164GISEL-LABEL: divergent_value_i64:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s6, s[2:3]
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT:    v_readlane_b32 s4, v2, s6
+; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v3, s6
+; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s6
+; GFX1164GISEL-NEXT:    s_xor_b64 s[0:1], s[0:1], s[4:5]
+; GFX1164GISEL-NEXT:    s_cmp_lg_u64 s[2:3], 0
+; GFX1164GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1164GISEL-NEXT:  ; %bb.2:
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX1164GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1164GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1132DAGISEL-LABEL: divergent_value_i64:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
+; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
+; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
+; GFX1132DAGISEL-NEXT:    s_xor_b64 s[0:1], s[0:1], s[4:5]
+; GFX1132DAGISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX1132DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1132DAGISEL-NEXT:  ; %bb.2:
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1132DAGISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1132GISEL-LABEL: divergent_value_i64:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT:    s_mov_b32 s1, s0
+; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
+; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
+; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
+; GFX1132GISEL-NEXT:    s_xor_b64 s[0:1], s[0:1], s[4:5]
+; GFX1132GISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX1132GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX1132GISEL-NEXT:  ; %bb.2:
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX1132GISEL-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %result = call i64 @llvm.amdgcn.wave.reduce.xor.i64(i64 %id.x, i32 1)
+  store i64 %result, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64 %in2) {
+; GFX8DAGISEL-LABEL: divergent_cfg_i64:
+; GFX8DAGISEL:       ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX8DAGISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX8DAGISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX8DAGISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8DAGISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX8DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8DAGISEL-NEXT:    s_and_b32 s7, s6, 1
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    s_mul_i32 s6, s2, s7
+; GFX8DAGISEL-NEXT:    s_mul_i32 s3, s3, s7
+; GFX8DAGISEL-NEXT:    s_mul_hi_u32 s2, s2, s7
+; GFX8DAGISEL-NEXT:    s_add_u32 s7, s2, s3
+; GFX8DAGISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT:    s_or_saveexec_b64 s[2:3], s[8:9]
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX8DAGISEL-NEXT:    s_xor_b64 exec, exec, s[2:3]
+; GFX8DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8DAGISEL-NEXT:    s_and_b32 s7, s6, 1
+; GFX8DAGISEL-NEXT:    s_mul_i32 s6, s4, s7
+; GFX8DAGISEL-NEXT:    s_mul_i32 s5, s5, s7
+; GFX8DAGISEL-NEXT:    s_mul_hi_u32 s4, s4, s7
+; GFX8DAGISEL-NEXT:    s_add_u32 s7, s4, s5
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX8DAGISEL-NEXT:  ; %bb.4: ; %endif
+; GFX8DAGISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT:    s_endpgm
+;
+; GFX8GISEL-LABEL: divergent_cfg_i64:
+; GFX8GISEL:       ; %bb.0: ; %entry
+; GFX8GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT:    v_cmp_le_u32_e32 vcc, 16, v0
+; GFX8GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX8GISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX8GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8GISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX8GISEL-NEXT:  ; %bb.1: ; %else
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8GISEL-NEXT:    s_and_b32 s7, s6, 1
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    s_mul_i32 s6, s2, s7
+; GFX8GISEL-NEXT:    s_mul_i32 s3, s3, s7
+; GFX8GISEL-NEXT:    s_mul_hi_u32 s2, s2, s7
+; GFX8GISEL-NEXT:    s_add_u32 s7, s2, s3
+; GFX8GISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX8GISEL-NEXT:    s_cbranch_execz .LBB9_4
+; GFX8GISEL-NEXT:  ; %bb.3: ; %if
+; GFX8GISEL-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8GISEL-NEXT:    s_and_b32 s7, s6, 1
+; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT:    s_mul_i32 s6, s4, s7
+; GFX8GISEL-NEXT:    s_mul_i32 s5, s5, s7
+; GFX8GISEL-NEXT:    s_mul_hi_u32 s4, s4, s7
+; GFX8GISEL-NEXT:    s_add_u32 s7, s4, s5
+; GFX8GISEL-NEXT:  .LBB9_4: ; %endif
+; GFX8GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT:    s_endpgm
+;
+; GFX9DAGISEL-LABEL: divergent_cfg_i64:
+; GFX9DAGISEL:       ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX9DAGISEL-NEXT:    ; implicit-def: $sgpr4_sgpr5
+; GFX9DAGISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX9DAGISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9DAGISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX9DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9DAGISEL-NEXT:    s_and_b32 s5, s4, 1
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    s_mul_i32 s4, s2, s5
+; GFX9DAGISEL-NEXT:    s_mul_i32 s3, s3, s5
+; GFX9DAGISEL-NEXT:    s_mul_hi_u32 s2, s2, s5
+; GFX9DAGISEL-NEXT:    s_add_u32 s5, s2, s3
+; GFX9DAGISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT:    s_or_saveexec_b64 s[2:3], s[8:9]
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9DAGISEL-NEXT:    s_xor_b64 exec, exec, s[2:3]
+; GFX9DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9DAGISEL-NEXT:    s_and_b32 s5, s4, 1
+; GFX9DAGISEL-NEXT:    s_mul_i32 s4, s6, s5
+; GFX9DAGISEL-NEXT:    s_mul_i32 s7, s7, s5
+; GFX9DAGISEL-NEXT:    s_mul_hi_u32 s5, s6, s5
+; GFX9DAGISEL-NEXT:    s_add_u32 s5, s5, s7
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9DAGISEL-NEXT:  ; %bb.4: ; %endif
+; GFX9DAGISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT:    s_endpgm
+;
+; GFX9GISEL-LABEL: divergent_cfg_i64:
+; GFX9GISEL:       ; %bb.0: ; %entry
+; GFX9GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT:    v_cmp_le_u32_e32 vcc, 16, v0
+; GFX9GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX9GISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX9GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9GISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX9GISEL-NEXT:  ; %bb.1: ; %else
+; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX9GISEL-NEXT:    s_and_b32 s7, s6, 1
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    s_mul_i32 s6, s2, s7
+; GFX9GISEL-NEXT:    s_mul_i32 s3, s3, s7
+; GFX9GISEL-NEXT:    s_mul_hi_u32 s2, s2, s7
+; GFX9GISEL-NEXT:    s_add_u32 s7, s2, s3
+; GFX9GISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX9GISEL-NEXT:    s_cbranch_execz .LBB9_4
+; GFX9GISEL-NEXT:  ; %bb.3: ; %if
+; GFX9GISEL-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x34
+; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9GISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9GISEL-NEXT:    s_and_b32 s4, s4, 1
+; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT:    s_mul_i32 s6, s8, s4
+; GFX9GISEL-NEXT:    s_mul_i32 s5, s9, s4
+; GFX9GISEL-NEXT:    s_mul_hi_u32 s4, s8, s4
+; GFX9GISEL-NEXT:    s_add_u32 s7, s4, s5
+; GFX9GISEL-NEXT:  .LBB9_4: ; %endif
+; GFX9GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT:    s_endpgm
+;
+; GFX1064DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1064DAGISEL:       ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT:    s_clause 0x1
+; GFX1064DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX1064DAGISEL-NEXT:    ; implicit-def: $sgpr8_sgpr9
+; GFX1064DAGISEL-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX1064DAGISEL-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
+; GFX1064DAGISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX1064DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[8:9], exec
+; GFX1064DAGISEL-NEXT:    s_bcnt1_i32_b64 s8, s[8:9]
+; GFX1064DAGISEL-NEXT:    s_and_b32 s8, s8, 1
+; GFX1064DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT:    s_mul_i32 s3, s3, s8
+; GFX1064DAGISEL-NEXT:    s_mul_hi_u32 s9, s2, s8
+; GFX1064DAGISEL-NEXT:    s_mul_i32 s8, s2, s8
+; GFX1064DAGISEL-NEXT:    s_add_u32 s9, s9, s3
+; GFX1064DAGISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX1064DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT:    s_or_saveexec_b64 s[2:3], s[4:5]
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s8
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s9
+; GFX1064DAGISEL-NEXT:    s_xor_b64 exec, exec, s[2:3]
+; GFX1064DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX1064DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064DAGISEL-NEXT:    s_and_b32 s4, s4, 1
+; GFX1064DAGISEL-NEXT:    s_mul_i32 s5, s7, s4
+; GFX1064DAGISEL-NEXT:    s_mul_hi_u32 s7, s6, s4
+; GFX1064DAGISEL-NEXT:    s_mul_i32 s4, s6, s4
+; GFX1064DAGISEL-NEXT:    s_add_u32 s5, s7, s5
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1064DAGISEL-NEXT:  ; %bb.4: ; %endif
+; GFX1064DAGISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT:    s_endpgm
+;
+; GFX1064GISEL-LABEL: divergent_cfg_i64:
+; GFX1064GISEL:       ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT:    v_cmp_le_u32_e32 vcc, 16, v0
+; GFX1064GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1064GISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX1064GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1064GISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX1064GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1064GISEL-NEXT:    s_and_b32 s6, s6, 1
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    s_mul_i32 s3, s3, s6
+; GFX1064GISEL-NEXT:    s_mul_hi_u32 s7, s2, s6
+; GFX1064GISEL-NEXT:    s_mul_i32 s6, s2, s6
+; GFX1064GISEL-NEXT:    s_add_u32 s7, s7, s3
+; GFX1064GISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX1064GISEL-NEXT:    s_cbranch_execz .LBB9_4
+; GFX1064GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1064GISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX1064GISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064GISEL-NEXT:    s_and_b32 s4, s4, 1
+; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT:    s_mul_i32 s5, s7, s4
+; GFX1064GISEL-NEXT:    s_mul_hi_u32 s7, s6, s4
+; GFX1064GISEL-NEXT:    s_mul_i32 s6, s6, s4
+; GFX1064GISEL-NEXT:    s_add_u32 s7, s7, s5
+; GFX1064GISEL-NEXT:  .LBB9_4: ; %endif
+; GFX1064GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1064GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT:    s_endpgm
+;
+; GFX1032DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1032DAGISEL:       ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT:    s_clause 0x1
+; GFX1032DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc_lo, 15, v0
+; GFX1032DAGISEL-NEXT:    ; implicit-def: $sgpr4_sgpr5
+; GFX1032DAGISEL-NEXT:    s_and_saveexec_b32 s8, vcc_lo
+; GFX1032DAGISEL-NEXT:    s_xor_b32 s8, exec_lo, s8
+; GFX1032DAGISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX1032DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, exec_lo
+; GFX1032DAGISEL-NEXT:    s_bcnt1_i32_b32 s4, s4
+; GFX1032DAGISEL-NEXT:    s_and_b32 s4, s4, 1
+; GFX1032DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT:    s_mul_i32 s3, s3, s4
+; GFX1032DAGISEL-NEXT:    s_mul_hi_u32 s5, s2, s4
+; GFX1032DAGISEL-NEXT:    s_mul_i32 s4, s2, s4
+; GFX1032DAGISEL-NEXT:    s_add_u32 s5, s5, s3
+; GFX1032DAGISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX1032DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT:    s_or_saveexec_b32 s2, s8
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1032DAGISEL-NEXT:    s_xor_b32 exec_lo, exec_lo, s2
+; GFX1032DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX1032DAGISEL-NEXT:    s_mov_b32 s3, exec_lo
+; GFX1032DAGISEL-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX1032DAGISEL-NEXT:    s_and_b32 s3, s3, 1
+; GFX1032DAGISEL-NEXT:    s_mul_i32 s5, s7, s3
+; GFX1032DAGISEL-NEXT:    s_mul_hi_u32 s7, s6, s3
+; GFX1032DAGISEL-NEXT:    s_mul_i32 s4, s6, s3
+; GFX1032DAGISEL-NEXT:    s_add_u32 s5, s7, s5
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1032DAGISEL-NEXT:  ; %bb.4: ; %endif
+; GFX1032DAGISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT:    s_endpgm
+;
+; GFX1032GISEL-LABEL: divergent_cfg_i64:
+; GFX1032GISEL:       ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT:    v_cmp_le_u32_e32 vcc_lo, 16, v0
+; GFX1032GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1032GISEL-NEXT:    s_and_saveexec_b32 s8, vcc_lo
+; GFX1032GISEL-NEXT:    s_xor_b32 s8, exec_lo, s8
+; GFX1032GISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX1032GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT:    s_bcnt1_i32_b32 s6, s6
+; GFX1032GISEL-NEXT:    s_and_b32 s6, s6, 1
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    s_mul_i32 s3, s3, s6
+; GFX1032GISEL-NEXT:    s_mul_hi_u32 s7, s2, s6
+; GFX1032GISEL-NEXT:    s_mul_i32 s6, s2, s6
+; GFX1032GISEL-NEXT:    s_add_u32 s7, s7, s3
+; GFX1032GISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    s_andn2_saveexec_b32 s2, s8
+; GFX1032GISEL-NEXT:    s_cbranch_execz .LBB9_4
+; GFX1032GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1032GISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032GISEL-NEXT:    s_mov_b32 s3, exec_lo
+; GFX1032GISEL-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX1032GISEL-NEXT:    s_and_b32 s3, s3, 1
+; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT:    s_mul_i32 s4, s7, s3
+; GFX1032GISEL-NEXT:    s_mul_hi_u32 s5, s6, s3
+; GFX1032GISEL-NEXT:    s_mul_i32 s6, s6, s3
+; GFX1032GISEL-NEXT:    s_add_u32 s7, s5, s4
+; GFX1032GISEL-NEXT:  .LBB9_4: ; %endif
+; GFX1032GISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1032GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT:    s_endpgm
+;
+; GFX1164DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1164DAGISEL:       ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT:    s_clause 0x1
+; GFX1164DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164DAGISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT:    ; implicit-def: $sgpr8_sgpr9
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT:    v_cmpx_lt_u32_e32 15, v0
+; GFX1164DAGISEL-NEXT:    s_xor_b64 s[6:7], exec, s[6:7]
+; GFX1164DAGISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX1164DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[8:9], exec
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT:    s_bcnt1_i32_b64 s8, s[8:9]
+; GFX1164DAGISEL-NEXT:    s_and_b32 s8, s8, 1
+; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    s_mul_i32 s3, s3, s8
+; GFX1164DAGISEL-NEXT:    s_mul_hi_u32 s9, s2, s8
+; GFX1164DAGISEL-NEXT:    s_mul_i32 s8, s2, s8
+; GFX1164DAGISEL-NEXT:    s_add_u32 s9, s9, s3
+; GFX1164DAGISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT:    s_or_saveexec_b64 s[2:3], s[6:7]
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s8
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s9
+; GFX1164DAGISEL-NEXT:    s_xor_b64 exec, exec, s[2:3]
+; GFX1164DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT:    s_and_b32 s6, s6, 1
+; GFX1164DAGISEL-NEXT:    s_mul_i32 s5, s5, s6
+; GFX1164DAGISEL-NEXT:    s_mul_hi_u32 s7, s4, s6
+; GFX1164DAGISEL-NEXT:    s_mul_i32 s4, s4, s6
+; GFX1164DAGISEL-NEXT:    s_add_u32 s5, s7, s5
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1164DAGISEL-NEXT:  ; %bb.4: ; %endif
+; GFX1164DAGISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT:    s_endpgm
+;
+; GFX1164GISEL-LABEL: divergent_cfg_i64:
+; GFX1164GISEL:       ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164GISEL-NEXT:    s_mov_b64 s[8:9], exec
+; GFX1164GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1164GISEL-NEXT:    v_cmpx_le_u32_e32 16, v0
+; GFX1164GISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1164GISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX1164GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1164GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164GISEL-NEXT:    s_and_b32 s6, s6, 1
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_mul_i32 s3, s3, s6
+; GFX1164GISEL-NEXT:    s_mul_hi_u32 s7, s2, s6
+; GFX1164GISEL-NEXT:    s_mul_i32 s6, s2, s6
+; GFX1164GISEL-NEXT:    s_add_u32 s7, s7, s3
+; GFX1164GISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_and_not1_saveexec_b64 s[2:3], s[8:9]
+; GFX1164GISEL-NEXT:    s_cbranch_execz .LBB9_4
+; GFX1164GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1164GISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164GISEL-NEXT:    s_and_b32 s6, s6, 1
+; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT:    s_mul_i32 s5, s5, s6
+; GFX1164GISEL-NEXT:    s_mul_hi_u32 s7, s4, s6
+; GFX1164GISEL-NEXT:    s_mul_i32 s6, s4, s6
+; GFX1164GISEL-NEXT:    s_add_u32 s7, s7, s5
+; GFX1164GISEL-NEXT:  .LBB9_4: ; %endif
+; GFX1164GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT:    s_endpgm
+;
+; GFX1132DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1132DAGISEL:       ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT:    s_clause 0x1
+; GFX1132DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132DAGISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s8, exec_lo
+; GFX1132DAGISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT:    v_cmpx_lt_u32_e32 15, v0
+; GFX1132DAGISEL-NEXT:    s_xor_b32 s8, exec_lo, s8
+; GFX1132DAGISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX1132DAGISEL-NEXT:  ; %bb.1: ; %else
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    s_bcnt1_i32_b32 s6, s6
+; GFX1132DAGISEL-NEXT:    s_and_b32 s6, s6, 1
+; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    s_mul_i32 s3, s3, s6
+; GFX1132DAGISEL-NEXT:    s_mul_hi_u32 s7, s2, s6
+; GFX1132DAGISEL-NEXT:    s_mul_i32 s6, s2, s6
+; GFX1132DAGISEL-NEXT:    s_add_u32 s7, s7, s3
+; GFX1132DAGISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT:    s_or_saveexec_b32 s2, s8
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132DAGISEL-NEXT:    s_xor_b32 exec_lo, exec_lo, s2
+; GFX1132DAGISEL-NEXT:  ; %bb.3: ; %if
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    s_mov_b32 s3, exec_lo
+; GFX1132DAGISEL-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    s_and_b32 s3, s3, 1
+; GFX1132DAGISEL-NEXT:    s_mul_i32 s5, s5, s3
+; GFX1132DAGISEL-NEXT:    s_mul_hi_u32 s6, s4, s3
+; GFX1132DAGISEL-NEXT:    s_mul_i32 s4, s4, s3
+; GFX1132DAGISEL-NEXT:    s_add_u32 s5, s6, s5
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX1132DAGISEL-NEXT:  ; %bb.4: ; %endif
+; GFX1132DAGISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX1132DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT:    s_endpgm
+;
+; GFX1132GISEL-LABEL: divergent_cfg_i64:
+; GFX1132GISEL:       ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132GISEL-NEXT:    s_mov_b32 s8, exec_lo
+; GFX1132GISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT:    v_cmpx_le_u32_e32 16, v0
+; GFX1132GISEL-NEXT:    s_xor_b32 s8, exec_lo, s8
+; GFX1132GISEL-NEXT:    s_cbranch_execz .LBB9_2
+; GFX1132GISEL-NEXT:  ; %bb.1: ; %else
+; GFX1132GISEL-NEXT:    s_mov_b32 s6, exec_lo
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT:    s_bcnt1_i32_b32 s6, s6
+; GFX1132GISEL-NEXT:    s_and_b32 s6, s6, 1
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_mul_i32 s3, s3, s6
+; GFX1132GISEL-NEXT:    s_mul_hi_u32 s7, s2, s6
+; GFX1132GISEL-NEXT:    s_mul_i32 s6, s2, s6
+; GFX1132GISEL-NEXT:    s_add_u32 s7, s7, s3
+; GFX1132GISEL-NEXT:  .LBB9_2: ; %Flow
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_and_not1_saveexec_b32 s2, s8
+; GFX1132GISEL-NEXT:    s_cbranch_execz .LBB9_4
+; GFX1132GISEL-NEXT:  ; %bb.3: ; %if
+; GFX1132GISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132GISEL-NEXT:    s_mov_b32 s3, exec_lo
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX1132GISEL-NEXT:    s_and_b32 s3, s3, 1
+; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT:    s_mul_i32 s5, s5, s3
+; GFX1132GISEL-NEXT:    s_mul_hi_u32 s7, s4, s3
+; GFX1132GISEL-NEXT:    s_mul_i32 s6, s4, s3
+; GFX1132GISEL-NEXT:    s_add_u32 s7, s7, s5
+; GFX1132GISEL-NEXT:  .LBB9_4: ; %endif
+; GFX1132GISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT:    s_endpgm
+entry:
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %d_cmp = icmp ult i32 %tid, 16
+  br i1 %d_cmp, label %if, label %else
+
+if:
+  %reducedValTid = call i64 @llvm.amdgcn.wave.reduce.xor.i64(i64 %in2, i32 1)
+  br label %endif
+
+else:
+  %reducedValIn = call i64 @llvm.amdgcn.wave.reduce.xor.i64(i64 %in, i32 1)
+  br label %endif
+
+endif:
+  %combine = phi i64 [%reducedValTid, %if], [%reducedValIn, %else]
+  store i64 %combine, ptr addrspace(1) %out
+  ret void
+}
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; GFX10DAGISEL: {{.*}}
 ; GFX10GISEL: {{.*}}

>From 67819a140924caf7364f99cc87956b6f3fbc208c Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Thu, 31 Jul 2025 14:18:31 +0530
Subject: [PATCH 08/12] Removing Redundant Instructions

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  15 +-
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll  | 420 +++++++-----------
 2 files changed, 150 insertions(+), 285 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index d506b42a6e90a..4500c05fd2b73 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5469,10 +5469,6 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
                   MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
               Register DestSub1 =
                   MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
-              Register Op1H_Op0L_Reg =
-                  MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
-              Register CarryReg =
-                  MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
 
               const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
               const TargetRegisterClass *SrcSubRC =
@@ -5487,19 +5483,10 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
                   .add(Op1L)
                   .addReg(ParityRegister);
 
-              BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), Op1H_Op0L_Reg)
+              BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DestSub1)
                   .add(Op1H)
                   .addReg(ParityRegister);
 
-              BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_HI_U32), CarryReg)
-                  .add(Op1L)
-                  .addReg(ParityRegister);
-
-              BuildMI(BB, MI, DL, TII->get(AMDGPU::S_ADD_U32), DestSub1)
-                  .addReg(CarryReg)
-                  .addReg(Op1H_Op0L_Reg)
-                  .setOperandDead(3); // Dead scc
-
               BuildMI(BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), DstReg)
                   .addReg(DestSub0)
                   .addImm(AMDGPU::sub0)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll
index b96954d030fef..9c523b2404121 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll
@@ -1288,12 +1288,10 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX8DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
 ; GFX8DAGISEL-NEXT:    s_and_b32 s4, s4, 1
 ; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s1
-; GFX8DAGISEL-NEXT:    s_mul_i32 s0, s2, s4
 ; GFX8DAGISEL-NEXT:    s_mul_i32 s1, s3, s4
-; GFX8DAGISEL-NEXT:    s_mul_hi_u32 s2, s2, s4
-; GFX8DAGISEL-NEXT:    s_add_u32 s1, s2, s1
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT:    s_mul_i32 s0, s2, s4
 ; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s1
 ; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s0
 ; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
@@ -1304,15 +1302,13 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX8GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], exec
 ; GFX8GISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
-; GFX8GISEL-NEXT:    s_and_b32 s5, s4, 1
+; GFX8GISEL-NEXT:    s_and_b32 s4, s4, 1
 ; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8GISEL-NEXT:    s_mul_i32 s4, s2, s5
-; GFX8GISEL-NEXT:    s_mul_i32 s3, s3, s5
-; GFX8GISEL-NEXT:    s_mul_hi_u32 s2, s2, s5
-; GFX8GISEL-NEXT:    s_add_u32 s5, s2, s3
-; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX8GISEL-NEXT:    s_mul_i32 s2, s2, s4
+; GFX8GISEL-NEXT:    s_mul_i32 s3, s3, s4
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s1
-; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s0
 ; GFX8GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; GFX8GISEL-NEXT:    s_endpgm
@@ -1322,15 +1318,13 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX9DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
 ; GFX9DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
-; GFX9DAGISEL-NEXT:    s_and_b32 s5, s4, 1
+; GFX9DAGISEL-NEXT:    s_and_b32 s4, s4, 1
 ; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9DAGISEL-NEXT:    s_mul_i32 s4, s2, s5
-; GFX9DAGISEL-NEXT:    s_mul_i32 s3, s3, s5
-; GFX9DAGISEL-NEXT:    s_mul_hi_u32 s2, s2, s5
-; GFX9DAGISEL-NEXT:    s_add_u32 s5, s2, s3
-; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
-; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9DAGISEL-NEXT:    s_mul_i32 s2, s2, s4
+; GFX9DAGISEL-NEXT:    s_mul_i32 s3, s3, s4
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX9DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX9DAGISEL-NEXT:    s_endpgm
 ;
@@ -1339,15 +1333,13 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX9GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], exec
 ; GFX9GISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
-; GFX9GISEL-NEXT:    s_and_b32 s5, s4, 1
+; GFX9GISEL-NEXT:    s_and_b32 s4, s4, 1
 ; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9GISEL-NEXT:    s_mul_i32 s4, s2, s5
-; GFX9GISEL-NEXT:    s_mul_i32 s3, s3, s5
-; GFX9GISEL-NEXT:    s_mul_hi_u32 s2, s2, s5
-; GFX9GISEL-NEXT:    s_add_u32 s5, s2, s3
-; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, s4
-; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9GISEL-NEXT:    s_mul_i32 s2, s2, s4
+; GFX9GISEL-NEXT:    s_mul_i32 s3, s3, s4
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX9GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX9GISEL-NEXT:    s_endpgm
 ;
@@ -1359,10 +1351,8 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1064DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
 ; GFX1064DAGISEL-NEXT:    s_and_b32 s4, s4, 1
 ; GFX1064DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1064DAGISEL-NEXT:    s_mul_i32 s3, s3, s4
-; GFX1064DAGISEL-NEXT:    s_mul_hi_u32 s5, s2, s4
 ; GFX1064DAGISEL-NEXT:    s_mul_i32 s2, s2, s4
-; GFX1064DAGISEL-NEXT:    s_add_u32 s3, s5, s3
+; GFX1064DAGISEL-NEXT:    s_mul_i32 s3, s3, s4
 ; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX1064DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1376,10 +1366,8 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1064GISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
 ; GFX1064GISEL-NEXT:    s_and_b32 s4, s4, 1
 ; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1064GISEL-NEXT:    s_mul_i32 s3, s3, s4
-; GFX1064GISEL-NEXT:    s_mul_hi_u32 s5, s2, s4
 ; GFX1064GISEL-NEXT:    s_mul_i32 s2, s2, s4
-; GFX1064GISEL-NEXT:    s_add_u32 s3, s5, s3
+; GFX1064GISEL-NEXT:    s_mul_i32 s3, s3, s4
 ; GFX1064GISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1064GISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX1064GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1393,10 +1381,8 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1032DAGISEL-NEXT:    s_bcnt1_i32_b32 s4, s4
 ; GFX1032DAGISEL-NEXT:    s_and_b32 s4, s4, 1
 ; GFX1032DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1032DAGISEL-NEXT:    s_mul_i32 s3, s3, s4
-; GFX1032DAGISEL-NEXT:    s_mul_hi_u32 s5, s2, s4
 ; GFX1032DAGISEL-NEXT:    s_mul_i32 s2, s2, s4
-; GFX1032DAGISEL-NEXT:    s_add_u32 s3, s5, s3
+; GFX1032DAGISEL-NEXT:    s_mul_i32 s3, s3, s4
 ; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX1032DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1410,10 +1396,8 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1032GISEL-NEXT:    s_bcnt1_i32_b32 s4, s4
 ; GFX1032GISEL-NEXT:    s_and_b32 s4, s4, 1
 ; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1032GISEL-NEXT:    s_mul_i32 s3, s3, s4
-; GFX1032GISEL-NEXT:    s_mul_hi_u32 s5, s2, s4
 ; GFX1032GISEL-NEXT:    s_mul_i32 s2, s2, s4
-; GFX1032GISEL-NEXT:    s_add_u32 s3, s5, s3
+; GFX1032GISEL-NEXT:    s_mul_i32 s3, s3, s4
 ; GFX1032GISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1032GISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX1032GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1428,10 +1412,8 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX1164DAGISEL-NEXT:    s_and_b32 s4, s4, 1
 ; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1164DAGISEL-NEXT:    s_mul_i32 s3, s3, s4
-; GFX1164DAGISEL-NEXT:    s_mul_hi_u32 s5, s2, s4
 ; GFX1164DAGISEL-NEXT:    s_mul_i32 s2, s2, s4
-; GFX1164DAGISEL-NEXT:    s_add_u32 s3, s5, s3
+; GFX1164DAGISEL-NEXT:    s_mul_i32 s3, s3, s4
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX1164DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
@@ -1446,10 +1428,8 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX1164GISEL-NEXT:    s_and_b32 s4, s4, 1
 ; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1164GISEL-NEXT:    s_mul_i32 s3, s3, s4
-; GFX1164GISEL-NEXT:    s_mul_hi_u32 s5, s2, s4
 ; GFX1164GISEL-NEXT:    s_mul_i32 s2, s2, s4
-; GFX1164GISEL-NEXT:    s_add_u32 s3, s5, s3
+; GFX1164GISEL-NEXT:    s_mul_i32 s3, s3, s4
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX1164GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
@@ -1463,10 +1443,8 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1132DAGISEL-NEXT:    s_bcnt1_i32_b32 s4, s4
 ; GFX1132DAGISEL-NEXT:    s_and_b32 s4, s4, 1
 ; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1132DAGISEL-NEXT:    s_mul_i32 s3, s3, s4
-; GFX1132DAGISEL-NEXT:    s_mul_hi_u32 s5, s2, s4
 ; GFX1132DAGISEL-NEXT:    s_mul_i32 s2, s2, s4
-; GFX1132DAGISEL-NEXT:    s_add_u32 s3, s5, s3
+; GFX1132DAGISEL-NEXT:    s_mul_i32 s3, s3, s4
 ; GFX1132DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
 ; GFX1132DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
@@ -1478,14 +1456,11 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1132GISEL-NEXT:    s_mov_b32 s4, exec_lo
 ; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1132GISEL-NEXT:    s_bcnt1_i32_b32 s4, s4
-; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
 ; GFX1132GISEL-NEXT:    s_and_b32 s4, s4, 1
 ; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1132GISEL-NEXT:    s_mul_i32 s3, s3, s4
-; GFX1132GISEL-NEXT:    s_mul_hi_u32 s5, s2, s4
 ; GFX1132GISEL-NEXT:    s_mul_i32 s2, s2, s4
-; GFX1132GISEL-NEXT:    s_add_u32 s3, s5, s3
-; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT:    s_mul_i32 s3, s3, s4
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
@@ -1503,9 +1478,7 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX8DAGISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
 ; GFX8DAGISEL-NEXT:    s_and_b32 s3, s2, 1
 ; GFX8DAGISEL-NEXT:    s_mul_i32 s2, s3, 0x7b
-; GFX8DAGISEL-NEXT:    s_mul_i32 s4, s3, 0
-; GFX8DAGISEL-NEXT:    s_mul_hi_u32 s3, 0x7b, s3
-; GFX8DAGISEL-NEXT:    s_add_u32 s3, s3, s4
+; GFX8DAGISEL-NEXT:    s_mul_i32 s3, s3, 0
 ; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s2
@@ -1521,9 +1494,7 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX8GISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
 ; GFX8GISEL-NEXT:    s_and_b32 s3, s2, 1
 ; GFX8GISEL-NEXT:    s_mul_i32 s2, s3, 0x7b
-; GFX8GISEL-NEXT:    s_mul_i32 s4, s3, 0
-; GFX8GISEL-NEXT:    s_mul_hi_u32 s3, 0x7b, s3
-; GFX8GISEL-NEXT:    s_add_u32 s3, s3, s4
+; GFX8GISEL-NEXT:    s_mul_i32 s3, s3, 0
 ; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s1
@@ -1539,9 +1510,7 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX9DAGISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
 ; GFX9DAGISEL-NEXT:    s_and_b32 s3, s2, 1
 ; GFX9DAGISEL-NEXT:    s_mul_i32 s2, s3, 0x7b
-; GFX9DAGISEL-NEXT:    s_mul_i32 s4, s3, 0
-; GFX9DAGISEL-NEXT:    s_mul_hi_u32 s3, 0x7b, s3
-; GFX9DAGISEL-NEXT:    s_add_u32 s3, s3, s4
+; GFX9DAGISEL-NEXT:    s_mul_i32 s3, s3, 0
 ; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
@@ -1556,9 +1525,7 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX9GISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
 ; GFX9GISEL-NEXT:    s_and_b32 s3, s2, 1
 ; GFX9GISEL-NEXT:    s_mul_i32 s2, s3, 0x7b
-; GFX9GISEL-NEXT:    s_mul_i32 s4, s3, 0
-; GFX9GISEL-NEXT:    s_mul_hi_u32 s3, 0x7b, s3
-; GFX9GISEL-NEXT:    s_add_u32 s3, s3, s4
+; GFX9GISEL-NEXT:    s_mul_i32 s3, s3, 0
 ; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, 0
@@ -1572,11 +1539,9 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1064DAGISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
-; GFX1064DAGISEL-NEXT:    s_and_b32 s2, s2, 1
-; GFX1064DAGISEL-NEXT:    s_mul_i32 s3, s2, 0
-; GFX1064DAGISEL-NEXT:    s_mul_hi_u32 s4, 0x7b, s2
-; GFX1064DAGISEL-NEXT:    s_mulk_i32 s2, 0x7b
-; GFX1064DAGISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1064DAGISEL-NEXT:    s_and_b32 s3, s2, 1
+; GFX1064DAGISEL-NEXT:    s_mul_i32 s2, s3, 0x7b
+; GFX1064DAGISEL-NEXT:    s_mul_i32 s3, s3, 0
 ; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX1064DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
@@ -1589,11 +1554,9 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1064GISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1064GISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
-; GFX1064GISEL-NEXT:    s_and_b32 s2, s2, 1
-; GFX1064GISEL-NEXT:    s_mul_i32 s3, s2, 0
-; GFX1064GISEL-NEXT:    s_mul_hi_u32 s4, 0x7b, s2
-; GFX1064GISEL-NEXT:    s_mulk_i32 s2, 0x7b
-; GFX1064GISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1064GISEL-NEXT:    s_and_b32 s3, s2, 1
+; GFX1064GISEL-NEXT:    s_mul_i32 s2, s3, 0x7b
+; GFX1064GISEL-NEXT:    s_mul_i32 s3, s3, 0
 ; GFX1064GISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1064GISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
@@ -1606,11 +1569,9 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1032DAGISEL-NEXT:    s_bcnt1_i32_b32 s2, s2
-; GFX1032DAGISEL-NEXT:    s_and_b32 s2, s2, 1
-; GFX1032DAGISEL-NEXT:    s_mul_i32 s3, s2, 0
-; GFX1032DAGISEL-NEXT:    s_mul_hi_u32 s4, 0x7b, s2
-; GFX1032DAGISEL-NEXT:    s_mulk_i32 s2, 0x7b
-; GFX1032DAGISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1032DAGISEL-NEXT:    s_and_b32 s3, s2, 1
+; GFX1032DAGISEL-NEXT:    s_mul_i32 s2, s3, 0x7b
+; GFX1032DAGISEL-NEXT:    s_mul_i32 s3, s3, 0
 ; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX1032DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
@@ -1623,11 +1584,9 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX1032GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1032GISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1032GISEL-NEXT:    s_bcnt1_i32_b32 s2, s2
-; GFX1032GISEL-NEXT:    s_and_b32 s2, s2, 1
-; GFX1032GISEL-NEXT:    s_mul_i32 s3, s2, 0
-; GFX1032GISEL-NEXT:    s_mul_hi_u32 s4, 0x7b, s2
-; GFX1032GISEL-NEXT:    s_mulk_i32 s2, 0x7b
-; GFX1032GISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1032GISEL-NEXT:    s_and_b32 s3, s2, 1
+; GFX1032GISEL-NEXT:    s_mul_i32 s2, s3, 0x7b
+; GFX1032GISEL-NEXT:    s_mul_i32 s3, s3, 0
 ; GFX1032GISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1032GISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
@@ -1641,11 +1600,9 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1164DAGISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1164DAGISEL-NEXT:    s_and_b32 s2, s2, 1
-; GFX1164DAGISEL-NEXT:    s_mul_i32 s3, s2, 0
-; GFX1164DAGISEL-NEXT:    s_mul_hi_u32 s4, 0x7b, s2
-; GFX1164DAGISEL-NEXT:    s_mulk_i32 s2, 0x7b
-; GFX1164DAGISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1164DAGISEL-NEXT:    s_and_b32 s3, s2, 1
+; GFX1164DAGISEL-NEXT:    s_mul_i32 s2, s3, 0x7b
+; GFX1164DAGISEL-NEXT:    s_mul_i32 s3, s3, 0
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
@@ -1659,11 +1616,9 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1164GISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1164GISEL-NEXT:    s_and_b32 s2, s2, 1
-; GFX1164GISEL-NEXT:    s_mul_i32 s3, s2, 0
-; GFX1164GISEL-NEXT:    s_mul_hi_u32 s4, 0x7b, s2
-; GFX1164GISEL-NEXT:    s_mulk_i32 s2, 0x7b
-; GFX1164GISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1164GISEL-NEXT:    s_and_b32 s3, s2, 1
+; GFX1164GISEL-NEXT:    s_mul_i32 s2, s3, 0x7b
+; GFX1164GISEL-NEXT:    s_mul_i32 s3, s3, 0
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
@@ -1676,12 +1631,10 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132DAGISEL-NEXT:    s_bcnt1_i32_b32 s2, s2
-; GFX1132DAGISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1132DAGISEL-NEXT:    s_and_b32 s3, s2, 1
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132DAGISEL-NEXT:    s_mul_i32 s3, s2, 0
-; GFX1132DAGISEL-NEXT:    s_mul_hi_u32 s4, 0x7b, s2
-; GFX1132DAGISEL-NEXT:    s_mulk_i32 s2, 0x7b
-; GFX1132DAGISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1132DAGISEL-NEXT:    s_mul_i32 s2, s3, 0x7b
+; GFX1132DAGISEL-NEXT:    s_mul_i32 s3, s3, 0
 ; GFX1132DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
 ; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
@@ -1695,11 +1648,9 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1132GISEL-NEXT:    s_bcnt1_i32_b32 s2, s2
 ; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1132GISEL-NEXT:    s_and_b32 s2, s2, 1
-; GFX1132GISEL-NEXT:    s_mul_i32 s3, s2, 0
-; GFX1132GISEL-NEXT:    s_mul_hi_u32 s4, 0x7b, s2
-; GFX1132GISEL-NEXT:    s_mulk_i32 s2, 0x7b
-; GFX1132GISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1132GISEL-NEXT:    s_and_b32 s3, s2, 1
+; GFX1132GISEL-NEXT:    s_mul_i32 s2, s3, 0x7b
+; GFX1132GISEL-NEXT:    s_mul_i32 s3, s3, 0
 ; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
 ; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
@@ -1720,9 +1671,7 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX8DAGISEL-NEXT:    s_and_b32 s3, s2, 1
 ; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8DAGISEL-NEXT:    s_mul_i32 s2, s0, s3
-; GFX8DAGISEL-NEXT:    s_mul_i32 s4, s1, s3
-; GFX8DAGISEL-NEXT:    s_mul_hi_u32 s3, s0, s3
-; GFX8DAGISEL-NEXT:    s_add_u32 s3, s3, s4
+; GFX8DAGISEL-NEXT:    s_mul_i32 s3, s1, s3
 ; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s1
@@ -1738,9 +1687,7 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX8GISEL-NEXT:    s_and_b32 s3, s2, 1
 ; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8GISEL-NEXT:    s_mul_i32 s2, s0, s3
-; GFX8GISEL-NEXT:    s_mul_i32 s4, s1, s3
-; GFX8GISEL-NEXT:    s_mul_hi_u32 s3, s0, s3
-; GFX8GISEL-NEXT:    s_add_u32 s3, s3, s4
+; GFX8GISEL-NEXT:    s_mul_i32 s3, s1, s3
 ; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s1
 ; GFX8GISEL-NEXT:    v_mov_b32_e32 v1, s3
@@ -1757,9 +1704,7 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9DAGISEL-NEXT:    s_mul_i32 s2, s0, s3
-; GFX9DAGISEL-NEXT:    s_mul_i32 s4, s1, s3
-; GFX9DAGISEL-NEXT:    s_mul_hi_u32 s3, s0, s3
-; GFX9DAGISEL-NEXT:    s_add_u32 s3, s3, s4
+; GFX9DAGISEL-NEXT:    s_mul_i32 s3, s1, s3
 ; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX9DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1774,9 +1719,7 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9GISEL-NEXT:    s_mul_i32 s2, s0, s3
-; GFX9GISEL-NEXT:    s_mul_i32 s4, s1, s3
-; GFX9GISEL-NEXT:    s_mul_hi_u32 s3, s0, s3
-; GFX9GISEL-NEXT:    s_add_u32 s3, s3, s4
+; GFX9GISEL-NEXT:    s_mul_i32 s3, s1, s3
 ; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX9GISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX9GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1788,12 +1731,10 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1064DAGISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
-; GFX1064DAGISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1064DAGISEL-NEXT:    s_and_b32 s3, s2, 1
 ; GFX1064DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1064DAGISEL-NEXT:    s_mul_i32 s3, s1, s2
-; GFX1064DAGISEL-NEXT:    s_mul_hi_u32 s4, s0, s2
-; GFX1064DAGISEL-NEXT:    s_mul_i32 s2, s0, s2
-; GFX1064DAGISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1064DAGISEL-NEXT:    s_mul_i32 s2, s0, s3
+; GFX1064DAGISEL-NEXT:    s_mul_i32 s3, s1, s3
 ; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX1064DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1805,12 +1746,10 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1064GISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1064GISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
-; GFX1064GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1064GISEL-NEXT:    s_and_b32 s3, s2, 1
 ; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1064GISEL-NEXT:    s_mul_i32 s3, s1, s2
-; GFX1064GISEL-NEXT:    s_mul_hi_u32 s4, s0, s2
-; GFX1064GISEL-NEXT:    s_mul_i32 s2, s0, s2
-; GFX1064GISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1064GISEL-NEXT:    s_mul_i32 s2, s0, s3
+; GFX1064GISEL-NEXT:    s_mul_i32 s3, s1, s3
 ; GFX1064GISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1064GISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX1064GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1822,12 +1761,10 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1032DAGISEL-NEXT:    s_bcnt1_i32_b32 s2, s2
-; GFX1032DAGISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1032DAGISEL-NEXT:    s_and_b32 s3, s2, 1
 ; GFX1032DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1032DAGISEL-NEXT:    s_mul_i32 s3, s1, s2
-; GFX1032DAGISEL-NEXT:    s_mul_hi_u32 s4, s0, s2
-; GFX1032DAGISEL-NEXT:    s_mul_i32 s2, s0, s2
-; GFX1032DAGISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1032DAGISEL-NEXT:    s_mul_i32 s2, s0, s3
+; GFX1032DAGISEL-NEXT:    s_mul_i32 s3, s1, s3
 ; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX1032DAGISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1839,12 +1776,10 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1032GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1032GISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1032GISEL-NEXT:    s_bcnt1_i32_b32 s2, s2
-; GFX1032GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1032GISEL-NEXT:    s_and_b32 s3, s2, 1
 ; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1032GISEL-NEXT:    s_mul_i32 s3, s1, s2
-; GFX1032GISEL-NEXT:    s_mul_hi_u32 s4, s0, s2
-; GFX1032GISEL-NEXT:    s_mul_i32 s2, s0, s2
-; GFX1032GISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1032GISEL-NEXT:    s_mul_i32 s2, s0, s3
+; GFX1032GISEL-NEXT:    s_mul_i32 s3, s1, s3
 ; GFX1032GISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1032GISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX1032GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1857,12 +1792,10 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1164DAGISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164DAGISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1164DAGISEL-NEXT:    s_and_b32 s3, s2, 1
 ; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1164DAGISEL-NEXT:    s_mul_i32 s3, s1, s2
-; GFX1164DAGISEL-NEXT:    s_mul_hi_u32 s4, s0, s2
-; GFX1164DAGISEL-NEXT:    s_mul_i32 s2, s0, s2
-; GFX1164DAGISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1164DAGISEL-NEXT:    s_mul_i32 s2, s0, s3
+; GFX1164DAGISEL-NEXT:    s_mul_i32 s3, s1, s3
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX1164DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
@@ -1875,12 +1808,10 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1164GISEL-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1164GISEL-NEXT:    s_and_b32 s3, s2, 1
 ; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1164GISEL-NEXT:    s_mul_i32 s3, s1, s2
-; GFX1164GISEL-NEXT:    s_mul_hi_u32 s4, s0, s2
-; GFX1164GISEL-NEXT:    s_mul_i32 s2, s0, s2
-; GFX1164GISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1164GISEL-NEXT:    s_mul_i32 s2, s0, s3
+; GFX1164GISEL-NEXT:    s_mul_i32 s3, s1, s3
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX1164GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
@@ -1892,12 +1823,10 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132DAGISEL-NEXT:    s_bcnt1_i32_b32 s2, s2
-; GFX1132DAGISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1132DAGISEL-NEXT:    s_and_b32 s3, s2, 1
 ; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1132DAGISEL-NEXT:    s_mul_i32 s3, s1, s2
-; GFX1132DAGISEL-NEXT:    s_mul_hi_u32 s4, s0, s2
-; GFX1132DAGISEL-NEXT:    s_mul_i32 s2, s0, s2
-; GFX1132DAGISEL-NEXT:    s_add_u32 s3, s4, s3
+; GFX1132DAGISEL-NEXT:    s_mul_i32 s2, s0, s3
+; GFX1132DAGISEL-NEXT:    s_mul_i32 s3, s1, s3
 ; GFX1132DAGISEL-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
 ; GFX1132DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
@@ -1909,14 +1838,11 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1132GISEL-NEXT:    s_bcnt1_i32_b32 s2, s2
-; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT:    s_and_b32 s3, s2, 1
 ; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1132GISEL-NEXT:    s_mul_i32 s3, s1, s2
-; GFX1132GISEL-NEXT:    s_mul_hi_u32 s4, s0, s2
-; GFX1132GISEL-NEXT:    s_mul_i32 s2, s0, s2
-; GFX1132GISEL-NEXT:    s_add_u32 s3, s4, s3
-; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT:    s_mul_i32 s2, s0, s3
+; GFX1132GISEL-NEXT:    s_mul_i32 s3, s1, s3
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
@@ -2194,9 +2120,7 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX8DAGISEL-NEXT:    s_and_b32 s7, s6, 1
 ; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8DAGISEL-NEXT:    s_mul_i32 s6, s2, s7
-; GFX8DAGISEL-NEXT:    s_mul_i32 s3, s3, s7
-; GFX8DAGISEL-NEXT:    s_mul_hi_u32 s2, s2, s7
-; GFX8DAGISEL-NEXT:    s_add_u32 s7, s2, s3
+; GFX8DAGISEL-NEXT:    s_mul_i32 s7, s3, s7
 ; GFX8DAGISEL-NEXT:  .LBB9_2: ; %Flow
 ; GFX8DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8DAGISEL-NEXT:    s_or_saveexec_b64 s[2:3], s[8:9]
@@ -2206,13 +2130,11 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX8DAGISEL-NEXT:  ; %bb.3: ; %if
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX8DAGISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
-; GFX8DAGISEL-NEXT:    s_and_b32 s7, s6, 1
-; GFX8DAGISEL-NEXT:    s_mul_i32 s6, s4, s7
-; GFX8DAGISEL-NEXT:    s_mul_i32 s5, s5, s7
-; GFX8DAGISEL-NEXT:    s_mul_hi_u32 s4, s4, s7
-; GFX8DAGISEL-NEXT:    s_add_u32 s7, s4, s5
-; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s6
-; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s7
+; GFX8DAGISEL-NEXT:    s_and_b32 s6, s6, 1
+; GFX8DAGISEL-NEXT:    s_mul_i32 s4, s4, s6
+; GFX8DAGISEL-NEXT:    s_mul_i32 s5, s5, s6
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX8DAGISEL-NEXT:  ; %bb.4: ; %endif
 ; GFX8DAGISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
 ; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s0
@@ -2234,9 +2156,7 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX8GISEL-NEXT:    s_and_b32 s7, s6, 1
 ; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8GISEL-NEXT:    s_mul_i32 s6, s2, s7
-; GFX8GISEL-NEXT:    s_mul_i32 s3, s3, s7
-; GFX8GISEL-NEXT:    s_mul_hi_u32 s2, s2, s7
-; GFX8GISEL-NEXT:    s_add_u32 s7, s2, s3
+; GFX8GISEL-NEXT:    s_mul_i32 s7, s3, s7
 ; GFX8GISEL-NEXT:  .LBB9_2: ; %Flow
 ; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8GISEL-NEXT:    s_andn2_saveexec_b64 s[2:3], s[8:9]
@@ -2248,9 +2168,7 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX8GISEL-NEXT:    s_and_b32 s7, s6, 1
 ; GFX8GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8GISEL-NEXT:    s_mul_i32 s6, s4, s7
-; GFX8GISEL-NEXT:    s_mul_i32 s5, s5, s7
-; GFX8GISEL-NEXT:    s_mul_hi_u32 s4, s4, s7
-; GFX8GISEL-NEXT:    s_add_u32 s7, s4, s5
+; GFX8GISEL-NEXT:    s_mul_i32 s7, s5, s7
 ; GFX8GISEL-NEXT:  .LBB9_4: ; %endif
 ; GFX8GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
 ; GFX8GISEL-NEXT:    v_mov_b32_e32 v0, s6
@@ -2275,9 +2193,7 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX9DAGISEL-NEXT:    s_and_b32 s5, s4, 1
 ; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9DAGISEL-NEXT:    s_mul_i32 s4, s2, s5
-; GFX9DAGISEL-NEXT:    s_mul_i32 s3, s3, s5
-; GFX9DAGISEL-NEXT:    s_mul_hi_u32 s2, s2, s5
-; GFX9DAGISEL-NEXT:    s_add_u32 s5, s2, s3
+; GFX9DAGISEL-NEXT:    s_mul_i32 s5, s3, s5
 ; GFX9DAGISEL-NEXT:  .LBB9_2: ; %Flow
 ; GFX9DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9DAGISEL-NEXT:    s_or_saveexec_b64 s[2:3], s[8:9]
@@ -2289,9 +2205,7 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX9DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
 ; GFX9DAGISEL-NEXT:    s_and_b32 s5, s4, 1
 ; GFX9DAGISEL-NEXT:    s_mul_i32 s4, s6, s5
-; GFX9DAGISEL-NEXT:    s_mul_i32 s7, s7, s5
-; GFX9DAGISEL-NEXT:    s_mul_hi_u32 s5, s6, s5
-; GFX9DAGISEL-NEXT:    s_add_u32 s5, s5, s7
+; GFX9DAGISEL-NEXT:    s_mul_i32 s5, s7, s5
 ; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX9DAGISEL-NEXT:  ; %bb.4: ; %endif
@@ -2314,23 +2228,19 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX9GISEL-NEXT:    s_and_b32 s7, s6, 1
 ; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9GISEL-NEXT:    s_mul_i32 s6, s2, s7
-; GFX9GISEL-NEXT:    s_mul_i32 s3, s3, s7
-; GFX9GISEL-NEXT:    s_mul_hi_u32 s2, s2, s7
-; GFX9GISEL-NEXT:    s_add_u32 s7, s2, s3
+; GFX9GISEL-NEXT:    s_mul_i32 s7, s3, s7
 ; GFX9GISEL-NEXT:  .LBB9_2: ; %Flow
 ; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9GISEL-NEXT:    s_andn2_saveexec_b64 s[2:3], s[8:9]
 ; GFX9GISEL-NEXT:    s_cbranch_execz .LBB9_4
 ; GFX9GISEL-NEXT:  ; %bb.3: ; %if
-; GFX9GISEL-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x34
+; GFX9GISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
 ; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], exec
 ; GFX9GISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
 ; GFX9GISEL-NEXT:    s_and_b32 s4, s4, 1
 ; GFX9GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9GISEL-NEXT:    s_mul_i32 s6, s8, s4
-; GFX9GISEL-NEXT:    s_mul_i32 s5, s9, s4
-; GFX9GISEL-NEXT:    s_mul_hi_u32 s4, s8, s4
-; GFX9GISEL-NEXT:    s_add_u32 s7, s4, s5
+; GFX9GISEL-NEXT:    s_mul_i32 s6, s6, s4
+; GFX9GISEL-NEXT:    s_mul_i32 s7, s7, s4
 ; GFX9GISEL-NEXT:  .LBB9_4: ; %endif
 ; GFX9GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
 ; GFX9GISEL-NEXT:    v_mov_b32_e32 v0, s6
@@ -2345,33 +2255,29 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1064DAGISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; GFX1064DAGISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
 ; GFX1064DAGISEL-NEXT:    v_cmp_lt_u32_e32 vcc, 15, v0
-; GFX1064DAGISEL-NEXT:    ; implicit-def: $sgpr8_sgpr9
-; GFX1064DAGISEL-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GFX1064DAGISEL-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
+; GFX1064DAGISEL-NEXT:    ; implicit-def: $sgpr4_sgpr5
+; GFX1064DAGISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX1064DAGISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
 ; GFX1064DAGISEL-NEXT:    s_cbranch_execz .LBB9_2
 ; GFX1064DAGISEL-NEXT:  ; %bb.1: ; %else
-; GFX1064DAGISEL-NEXT:    s_mov_b64 s[8:9], exec
-; GFX1064DAGISEL-NEXT:    s_bcnt1_i32_b64 s8, s[8:9]
-; GFX1064DAGISEL-NEXT:    s_and_b32 s8, s8, 1
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GFX1064DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064DAGISEL-NEXT:    s_and_b32 s5, s4, 1
 ; GFX1064DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1064DAGISEL-NEXT:    s_mul_i32 s3, s3, s8
-; GFX1064DAGISEL-NEXT:    s_mul_hi_u32 s9, s2, s8
-; GFX1064DAGISEL-NEXT:    s_mul_i32 s8, s2, s8
-; GFX1064DAGISEL-NEXT:    s_add_u32 s9, s9, s3
+; GFX1064DAGISEL-NEXT:    s_mul_i32 s4, s2, s5
+; GFX1064DAGISEL-NEXT:    s_mul_i32 s5, s3, s5
 ; GFX1064DAGISEL-NEXT:  .LBB9_2: ; %Flow
 ; GFX1064DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1064DAGISEL-NEXT:    s_or_saveexec_b64 s[2:3], s[4:5]
-; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s8
-; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s9
+; GFX1064DAGISEL-NEXT:    s_or_saveexec_b64 s[2:3], s[8:9]
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX1064DAGISEL-NEXT:    s_xor_b64 exec, exec, s[2:3]
 ; GFX1064DAGISEL-NEXT:  ; %bb.3: ; %if
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], exec
 ; GFX1064DAGISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
-; GFX1064DAGISEL-NEXT:    s_and_b32 s4, s4, 1
-; GFX1064DAGISEL-NEXT:    s_mul_i32 s5, s7, s4
-; GFX1064DAGISEL-NEXT:    s_mul_hi_u32 s7, s6, s4
-; GFX1064DAGISEL-NEXT:    s_mul_i32 s4, s6, s4
-; GFX1064DAGISEL-NEXT:    s_add_u32 s5, s7, s5
+; GFX1064DAGISEL-NEXT:    s_and_b32 s5, s4, 1
+; GFX1064DAGISEL-NEXT:    s_mul_i32 s4, s6, s5
+; GFX1064DAGISEL-NEXT:    s_mul_i32 s5, s7, s5
 ; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX1064DAGISEL-NEXT:  ; %bb.4: ; %endif
@@ -2391,12 +2297,10 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1064GISEL-NEXT:  ; %bb.1: ; %else
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX1064GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
-; GFX1064GISEL-NEXT:    s_and_b32 s6, s6, 1
+; GFX1064GISEL-NEXT:    s_and_b32 s7, s6, 1
 ; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1064GISEL-NEXT:    s_mul_i32 s3, s3, s6
-; GFX1064GISEL-NEXT:    s_mul_hi_u32 s7, s2, s6
-; GFX1064GISEL-NEXT:    s_mul_i32 s6, s2, s6
-; GFX1064GISEL-NEXT:    s_add_u32 s7, s7, s3
+; GFX1064GISEL-NEXT:    s_mul_i32 s6, s2, s7
+; GFX1064GISEL-NEXT:    s_mul_i32 s7, s3, s7
 ; GFX1064GISEL-NEXT:  .LBB9_2: ; %Flow
 ; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1064GISEL-NEXT:    s_andn2_saveexec_b64 s[2:3], s[8:9]
@@ -2407,10 +2311,8 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1064GISEL-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
 ; GFX1064GISEL-NEXT:    s_and_b32 s4, s4, 1
 ; GFX1064GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1064GISEL-NEXT:    s_mul_i32 s5, s7, s4
-; GFX1064GISEL-NEXT:    s_mul_hi_u32 s7, s6, s4
 ; GFX1064GISEL-NEXT:    s_mul_i32 s6, s6, s4
-; GFX1064GISEL-NEXT:    s_add_u32 s7, s7, s5
+; GFX1064GISEL-NEXT:    s_mul_i32 s7, s7, s4
 ; GFX1064GISEL-NEXT:  .LBB9_4: ; %endif
 ; GFX1064GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
 ; GFX1064GISEL-NEXT:    v_mov_b32_e32 v0, s6
@@ -2432,12 +2334,10 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1032DAGISEL-NEXT:  ; %bb.1: ; %else
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, exec_lo
 ; GFX1032DAGISEL-NEXT:    s_bcnt1_i32_b32 s4, s4
-; GFX1032DAGISEL-NEXT:    s_and_b32 s4, s4, 1
+; GFX1032DAGISEL-NEXT:    s_and_b32 s5, s4, 1
 ; GFX1032DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1032DAGISEL-NEXT:    s_mul_i32 s3, s3, s4
-; GFX1032DAGISEL-NEXT:    s_mul_hi_u32 s5, s2, s4
-; GFX1032DAGISEL-NEXT:    s_mul_i32 s4, s2, s4
-; GFX1032DAGISEL-NEXT:    s_add_u32 s5, s5, s3
+; GFX1032DAGISEL-NEXT:    s_mul_i32 s4, s2, s5
+; GFX1032DAGISEL-NEXT:    s_mul_i32 s5, s3, s5
 ; GFX1032DAGISEL-NEXT:  .LBB9_2: ; %Flow
 ; GFX1032DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1032DAGISEL-NEXT:    s_or_saveexec_b32 s2, s8
@@ -2448,10 +2348,8 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s3, exec_lo
 ; GFX1032DAGISEL-NEXT:    s_bcnt1_i32_b32 s3, s3
 ; GFX1032DAGISEL-NEXT:    s_and_b32 s3, s3, 1
-; GFX1032DAGISEL-NEXT:    s_mul_i32 s5, s7, s3
-; GFX1032DAGISEL-NEXT:    s_mul_hi_u32 s7, s6, s3
 ; GFX1032DAGISEL-NEXT:    s_mul_i32 s4, s6, s3
-; GFX1032DAGISEL-NEXT:    s_add_u32 s5, s7, s5
+; GFX1032DAGISEL-NEXT:    s_mul_i32 s5, s7, s3
 ; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX1032DAGISEL-NEXT:  ; %bb.4: ; %endif
@@ -2471,12 +2369,10 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1032GISEL-NEXT:  ; %bb.1: ; %else
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
 ; GFX1032GISEL-NEXT:    s_bcnt1_i32_b32 s6, s6
-; GFX1032GISEL-NEXT:    s_and_b32 s6, s6, 1
+; GFX1032GISEL-NEXT:    s_and_b32 s7, s6, 1
 ; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1032GISEL-NEXT:    s_mul_i32 s3, s3, s6
-; GFX1032GISEL-NEXT:    s_mul_hi_u32 s7, s2, s6
-; GFX1032GISEL-NEXT:    s_mul_i32 s6, s2, s6
-; GFX1032GISEL-NEXT:    s_add_u32 s7, s7, s3
+; GFX1032GISEL-NEXT:    s_mul_i32 s6, s2, s7
+; GFX1032GISEL-NEXT:    s_mul_i32 s7, s3, s7
 ; GFX1032GISEL-NEXT:  .LBB9_2: ; %Flow
 ; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1032GISEL-NEXT:    s_andn2_saveexec_b32 s2, s8
@@ -2487,10 +2383,8 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1032GISEL-NEXT:    s_bcnt1_i32_b32 s3, s3
 ; GFX1032GISEL-NEXT:    s_and_b32 s3, s3, 1
 ; GFX1032GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1032GISEL-NEXT:    s_mul_i32 s4, s7, s3
-; GFX1032GISEL-NEXT:    s_mul_hi_u32 s5, s6, s3
 ; GFX1032GISEL-NEXT:    s_mul_i32 s6, s6, s3
-; GFX1032GISEL-NEXT:    s_add_u32 s7, s5, s4
+; GFX1032GISEL-NEXT:    s_mul_i32 s7, s7, s3
 ; GFX1032GISEL-NEXT:  .LBB9_4: ; %endif
 ; GFX1032GISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
 ; GFX1032GISEL-NEXT:    v_mov_b32_e32 v0, s6
@@ -2505,27 +2399,25 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1164DAGISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
 ; GFX1164DAGISEL-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
 ; GFX1164DAGISEL-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
-; GFX1164DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1164DAGISEL-NEXT:    ; implicit-def: $sgpr8_sgpr9
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[8:9], exec
+; GFX1164DAGISEL-NEXT:    ; implicit-def: $sgpr6_sgpr7
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX1164DAGISEL-NEXT:    v_cmpx_lt_u32_e32 15, v0
-; GFX1164DAGISEL-NEXT:    s_xor_b64 s[6:7], exec, s[6:7]
+; GFX1164DAGISEL-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
 ; GFX1164DAGISEL-NEXT:    s_cbranch_execz .LBB9_2
 ; GFX1164DAGISEL-NEXT:  ; %bb.1: ; %else
-; GFX1164DAGISEL-NEXT:    s_mov_b64 s[8:9], exec
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1164DAGISEL-NEXT:    s_bcnt1_i32_b64 s8, s[8:9]
-; GFX1164DAGISEL-NEXT:    s_and_b32 s8, s8, 1
+; GFX1164DAGISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164DAGISEL-NEXT:    s_and_b32 s7, s6, 1
 ; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1164DAGISEL-NEXT:    s_mul_i32 s3, s3, s8
-; GFX1164DAGISEL-NEXT:    s_mul_hi_u32 s9, s2, s8
-; GFX1164DAGISEL-NEXT:    s_mul_i32 s8, s2, s8
-; GFX1164DAGISEL-NEXT:    s_add_u32 s9, s9, s3
+; GFX1164DAGISEL-NEXT:    s_mul_i32 s6, s2, s7
+; GFX1164DAGISEL-NEXT:    s_mul_i32 s7, s3, s7
 ; GFX1164DAGISEL-NEXT:  .LBB9_2: ; %Flow
 ; GFX1164DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1164DAGISEL-NEXT:    s_or_saveexec_b64 s[2:3], s[6:7]
-; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s8
-; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s9
+; GFX1164DAGISEL-NEXT:    s_or_saveexec_b64 s[2:3], s[8:9]
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s7
 ; GFX1164DAGISEL-NEXT:    s_xor_b64 exec, exec, s[2:3]
 ; GFX1164DAGISEL-NEXT:  ; %bb.3: ; %if
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -2533,10 +2425,8 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1164DAGISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164DAGISEL-NEXT:    s_and_b32 s6, s6, 1
-; GFX1164DAGISEL-NEXT:    s_mul_i32 s5, s5, s6
-; GFX1164DAGISEL-NEXT:    s_mul_hi_u32 s7, s4, s6
 ; GFX1164DAGISEL-NEXT:    s_mul_i32 s4, s4, s6
-; GFX1164DAGISEL-NEXT:    s_add_u32 s5, s7, s5
+; GFX1164DAGISEL-NEXT:    s_mul_i32 s5, s5, s6
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX1164DAGISEL-NEXT:  ; %bb.4: ; %endif
@@ -2559,12 +2449,10 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
-; GFX1164GISEL-NEXT:    s_and_b32 s6, s6, 1
+; GFX1164GISEL-NEXT:    s_and_b32 s7, s6, 1
 ; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1164GISEL-NEXT:    s_mul_i32 s3, s3, s6
-; GFX1164GISEL-NEXT:    s_mul_hi_u32 s7, s2, s6
-; GFX1164GISEL-NEXT:    s_mul_i32 s6, s2, s6
-; GFX1164GISEL-NEXT:    s_add_u32 s7, s7, s3
+; GFX1164GISEL-NEXT:    s_mul_i32 s6, s2, s7
+; GFX1164GISEL-NEXT:    s_mul_i32 s7, s3, s7
 ; GFX1164GISEL-NEXT:  .LBB9_2: ; %Flow
 ; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1164GISEL-NEXT:    s_and_not1_saveexec_b64 s[2:3], s[8:9]
@@ -2574,12 +2462,10 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164GISEL-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
-; GFX1164GISEL-NEXT:    s_and_b32 s6, s6, 1
+; GFX1164GISEL-NEXT:    s_and_b32 s7, s6, 1
 ; GFX1164GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1164GISEL-NEXT:    s_mul_i32 s5, s5, s6
-; GFX1164GISEL-NEXT:    s_mul_hi_u32 s7, s4, s6
-; GFX1164GISEL-NEXT:    s_mul_i32 s6, s4, s6
-; GFX1164GISEL-NEXT:    s_add_u32 s7, s7, s5
+; GFX1164GISEL-NEXT:    s_mul_i32 s6, s4, s7
+; GFX1164GISEL-NEXT:    s_mul_i32 s7, s5, s7
 ; GFX1164GISEL-NEXT:  .LBB9_4: ; %endif
 ; GFX1164GISEL-NEXT:    s_or_b64 exec, exec, s[2:3]
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v0, s6
@@ -2604,12 +2490,10 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132DAGISEL-NEXT:    s_bcnt1_i32_b32 s6, s6
-; GFX1132DAGISEL-NEXT:    s_and_b32 s6, s6, 1
+; GFX1132DAGISEL-NEXT:    s_and_b32 s7, s6, 1
 ; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1132DAGISEL-NEXT:    s_mul_i32 s3, s3, s6
-; GFX1132DAGISEL-NEXT:    s_mul_hi_u32 s7, s2, s6
-; GFX1132DAGISEL-NEXT:    s_mul_i32 s6, s2, s6
-; GFX1132DAGISEL-NEXT:    s_add_u32 s7, s7, s3
+; GFX1132DAGISEL-NEXT:    s_mul_i32 s6, s2, s7
+; GFX1132DAGISEL-NEXT:    s_mul_i32 s7, s3, s7
 ; GFX1132DAGISEL-NEXT:  .LBB9_2: ; %Flow
 ; GFX1132DAGISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1132DAGISEL-NEXT:    s_or_saveexec_b32 s2, s8
@@ -2621,10 +2505,8 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1132DAGISEL-NEXT:    s_bcnt1_i32_b32 s3, s3
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132DAGISEL-NEXT:    s_and_b32 s3, s3, 1
-; GFX1132DAGISEL-NEXT:    s_mul_i32 s5, s5, s3
-; GFX1132DAGISEL-NEXT:    s_mul_hi_u32 s6, s4, s3
 ; GFX1132DAGISEL-NEXT:    s_mul_i32 s4, s4, s3
-; GFX1132DAGISEL-NEXT:    s_add_u32 s5, s6, s5
+; GFX1132DAGISEL-NEXT:    s_mul_i32 s5, s5, s3
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
 ; GFX1132DAGISEL-NEXT:  ; %bb.4: ; %endif
@@ -2647,12 +2529,10 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1132GISEL-NEXT:    s_mov_b32 s6, exec_lo
 ; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132GISEL-NEXT:    s_bcnt1_i32_b32 s6, s6
-; GFX1132GISEL-NEXT:    s_and_b32 s6, s6, 1
+; GFX1132GISEL-NEXT:    s_and_b32 s7, s6, 1
 ; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1132GISEL-NEXT:    s_mul_i32 s3, s3, s6
-; GFX1132GISEL-NEXT:    s_mul_hi_u32 s7, s2, s6
-; GFX1132GISEL-NEXT:    s_mul_i32 s6, s2, s6
-; GFX1132GISEL-NEXT:    s_add_u32 s7, s7, s3
+; GFX1132GISEL-NEXT:    s_mul_i32 s6, s2, s7
+; GFX1132GISEL-NEXT:    s_mul_i32 s7, s3, s7
 ; GFX1132GISEL-NEXT:  .LBB9_2: ; %Flow
 ; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1132GISEL-NEXT:    s_and_not1_saveexec_b32 s2, s8
@@ -2664,10 +2544,8 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1132GISEL-NEXT:    s_bcnt1_i32_b32 s3, s3
 ; GFX1132GISEL-NEXT:    s_and_b32 s3, s3, 1
 ; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1132GISEL-NEXT:    s_mul_i32 s5, s5, s3
-; GFX1132GISEL-NEXT:    s_mul_hi_u32 s7, s4, s3
 ; GFX1132GISEL-NEXT:    s_mul_i32 s6, s4, s3
-; GFX1132GISEL-NEXT:    s_add_u32 s7, s7, s5
+; GFX1132GISEL-NEXT:    s_mul_i32 s7, s5, s3
 ; GFX1132GISEL-NEXT:  .LBB9_4: ; %endif
 ; GFX1132GISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s2
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7

>From 4565c046b6baad67dcf4bc870535c949387cfdbb Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Thu, 7 Aug 2025 11:38:46 +0530
Subject: [PATCH 09/12] Removing `break` before `else`

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 4500c05fd2b73..d093d533c2052 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5463,7 +5463,6 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
               BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DstReg)
                   .addReg(SrcReg)
                   .addReg(ParityRegister);
-              break;
             } else {
               Register DestSub0 =
                   MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
@@ -5492,8 +5491,8 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
                   .addImm(AMDGPU::sub0)
                   .addReg(DestSub1)
                   .addImm(AMDGPU::sub1);
-              break;
             }
+            break;
           }
       case AMDGPU::S_SUB_I32: {
         Register NegatedVal = MRI.createVirtualRegister(DstRegClass);

>From 84a7b6a77e2a5930e2f88805a5a36038edebf625 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Thu, 7 Aug 2025 11:55:38 +0530
Subject: [PATCH 10/12] Running Clang Format

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 98 +++++++++++------------
 1 file changed, 49 insertions(+), 49 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index d093d533c2052..df533a4bae765 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5445,55 +5445,55 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
           BuildMI(BB, MI, DL, TII->get(BitCountOpc), NumActiveLanes)
               .addReg(ExecMask);
 
-          switch (Opc) {
-          case AMDGPU::S_XOR_B32:
-          case AMDGPU::S_XOR_B64: {
-            // Performing an XOR operation on a uniform value
-            // depends on the parity of the number of active lanes.
-            // For even parity, the result will be 0, for odd
-            // parity the result will be the same as the input value.
-            Register ParityRegister =
-                MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
-
-            BuildMI(BB, MI, DL, TII->get(AMDGPU::S_AND_B32), ParityRegister)
-                .addReg(NewAccumulator->getOperand(0).getReg())
-                .addImm(1)
-                .setOperandDead(3); // Dead scc
-            if (is32BitOpc) {
-              BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DstReg)
-                  .addReg(SrcReg)
-                  .addReg(ParityRegister);
-            } else {
-              Register DestSub0 =
-                  MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
-              Register DestSub1 =
-                  MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
-
-              const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
-              const TargetRegisterClass *SrcSubRC =
-                  TRI->getSubRegisterClass(SrcRC, AMDGPU::sub0);
-
-              MachineOperand Op1L = TII->buildExtractSubRegOrImm(
-                  MI, MRI, MI.getOperand(1), SrcRC, AMDGPU::sub0, SrcSubRC);
-              MachineOperand Op1H = TII->buildExtractSubRegOrImm(
-                  MI, MRI, MI.getOperand(1), SrcRC, AMDGPU::sub1, SrcSubRC);
-
-              BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DestSub0)
-                  .add(Op1L)
-                  .addReg(ParityRegister);
-
-              BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DestSub1)
-                  .add(Op1H)
-                  .addReg(ParityRegister);
-
-              BuildMI(BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), DstReg)
-                  .addReg(DestSub0)
-                  .addImm(AMDGPU::sub0)
-                  .addReg(DestSub1)
-                  .addImm(AMDGPU::sub1);
-            }
-            break;
-          }
+      switch (Opc) {
+      case AMDGPU::S_XOR_B32:
+      case AMDGPU::S_XOR_B64: {
+        // Performing an XOR operation on a uniform value
+        // depends on the parity of the number of active lanes.
+        // For even parity, the result will be 0, for odd
+        // parity the result will be the same as the input value.
+        Register ParityRegister =
+            MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+
+        BuildMI(BB, MI, DL, TII->get(AMDGPU::S_AND_B32), ParityRegister)
+            .addReg(NewAccumulator->getOperand(0).getReg())
+            .addImm(1)
+            .setOperandDead(3); // Dead scc
+        if (is32BitOpc) {
+          BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DstReg)
+              .addReg(SrcReg)
+              .addReg(ParityRegister);
+        } else {
+          Register DestSub0 =
+              MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+          Register DestSub1 =
+              MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+
+          const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
+          const TargetRegisterClass *SrcSubRC =
+              TRI->getSubRegisterClass(SrcRC, AMDGPU::sub0);
+
+          MachineOperand Op1L = TII->buildExtractSubRegOrImm(
+              MI, MRI, MI.getOperand(1), SrcRC, AMDGPU::sub0, SrcSubRC);
+          MachineOperand Op1H = TII->buildExtractSubRegOrImm(
+              MI, MRI, MI.getOperand(1), SrcRC, AMDGPU::sub1, SrcSubRC);
+
+          BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DestSub0)
+              .add(Op1L)
+              .addReg(ParityRegister);
+
+          BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DestSub1)
+              .add(Op1H)
+              .addReg(ParityRegister);
+
+          BuildMI(BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), DstReg)
+              .addReg(DestSub0)
+              .addImm(AMDGPU::sub0)
+              .addReg(DestSub1)
+              .addImm(AMDGPU::sub1);
+        }
+        break;
+      }
       case AMDGPU::S_SUB_I32: {
         Register NegatedVal = MRI.createVirtualRegister(DstRegClass);
 

>From 5dc17b147b5d54ff15292dd30141a40adba56c0c Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Wed, 13 Aug 2025 11:50:32 +0530
Subject: [PATCH 11/12] Directly checking for S_XOR_B32

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index df533a4bae765..dc27b63afdb64 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5395,18 +5395,18 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
     case AMDGPU::S_MAX_U32:
     case AMDGPU::S_MAX_I32:
     case AMDGPU::S_AND_B32:
-    case AMDGPU::S_AND_B64:
-    case AMDGPU::S_OR_B32:
-    case AMDGPU::S_OR_B64: {
+    case AMDGPU::S_OR_B32: {
       // Idempotent operations.
       BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MOV_B32), DstReg).addReg(SrcReg);
       RetBB = &BB;
       break;
     }
-    case AMDGPU::V_CMP_LT_U64_e64:   // umin
-    case AMDGPU::V_CMP_LT_I64_e64:   // min
-    case AMDGPU::V_CMP_GT_U64_e64:   // umax
-    case AMDGPU::V_CMP_GT_I64_e64: { // max
+    case AMDGPU::V_CMP_LT_U64_e64: // umin
+    case AMDGPU::V_CMP_LT_I64_e64: // min
+    case AMDGPU::V_CMP_GT_U64_e64: // umax
+    case AMDGPU::V_CMP_GT_I64_e64: // max
+    case AMDGPU::S_AND_B64:
+    case AMDGPU::S_OR_B64: {
       // Idempotent operations.
       BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MOV_B64), DstReg).addReg(SrcReg);
       RetBB = &BB;
@@ -5459,7 +5459,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
             .addReg(NewAccumulator->getOperand(0).getReg())
             .addImm(1)
             .setOperandDead(3); // Dead scc
-        if (is32BitOpc) {
+        if (Opc == AMDGPU::S_XOR_B32) {
           BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DstReg)
               .addReg(SrcReg)
               .addReg(ParityRegister);

>From ac6e7ceedbdd4601a88885d6c72bbf405d6da5ce Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Wed, 13 Aug 2025 16:37:00 +0530
Subject: [PATCH 12/12] Code Formating

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  22 +-
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll  | 197 ------------
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.and.ll  |  44 +--
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll  | 188 -----------
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll  | 188 -----------
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.or.ll   |  44 +--
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll  | 197 ------------
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll | 296 ------------------
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll | 172 ----------
 .../CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll  |  44 +--
 10 files changed, 55 insertions(+), 1337 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index dc27b63afdb64..2a977247bc2cb 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5334,12 +5334,9 @@ static uint32_t getIdentityValueFor32BitWaveReduction(unsigned Opc) {
   case AMDGPU::S_ADD_I32:
   case AMDGPU::S_SUB_I32:
   case AMDGPU::S_OR_B32:
-  case AMDGPU::S_OR_B64:
   case AMDGPU::S_XOR_B32:
-  case AMDGPU::S_XOR_B64:
     return std::numeric_limits<uint32_t>::min();
   case AMDGPU::S_AND_B32:
-  case AMDGPU::S_AND_B64:
     return std::numeric_limits<uint32_t>::max();
   default:
     llvm_unreachable(
@@ -5359,7 +5356,11 @@ static uint64_t getIdentityValueFor64BitWaveReduction(unsigned Opc) {
     return std::numeric_limits<int64_t>::min();
   case AMDGPU::S_ADD_U64_PSEUDO:
   case AMDGPU::S_SUB_U64_PSEUDO:
+  case AMDGPU::S_OR_B64:
+  case AMDGPU::S_XOR_B64:
     return std::numeric_limits<uint64_t>::min();
+  case AMDGPU::S_AND_B64:
+    return std::numeric_limits<uint64_t>::max();
   default:
     llvm_unreachable(
         "Unexpected opcode in getIdentityValueFor64BitWaveReduction");
@@ -5412,15 +5413,6 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
       RetBB = &BB;
       break;
     }
-    case AMDGPU::V_CMP_LT_U64_e64:   // umin
-    case AMDGPU::V_CMP_LT_I64_e64:   // min
-    case AMDGPU::V_CMP_GT_U64_e64:   // umax
-    case AMDGPU::V_CMP_GT_I64_e64: { // max
-      // Idempotent operations.
-      BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MOV_B64), DstReg).addReg(SrcReg);
-      RetBB = &BB;
-      break;
-    }
     case AMDGPU::S_XOR_B32:
     case AMDGPU::S_XOR_B64:
     case AMDGPU::S_ADD_I32:
@@ -5689,9 +5681,9 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
                            .addReg(LaneValueHiReg)
                            .addImm(AMDGPU::sub1);
       switch (Opc) {
-      case ::AMDGPU::S_OR_B64:
-      case ::AMDGPU::S_AND_B64:
-      case ::AMDGPU::S_XOR_B64: {
+      case AMDGPU::S_OR_B64:
+      case AMDGPU::S_AND_B64:
+      case AMDGPU::S_XOR_B64: {
         NewAccumulator = BuildMI(*ComputeLoop, I, DL, TII->get(Opc), DstReg)
                              .addReg(Accumulator->getOperand(0).getReg())
                              .addReg(LaneValue->getOperand(0).getReg())
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
index ac84c71f5b103..5f303e30c923b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
@@ -1523,10 +1523,6 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: uniform_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -1543,11 +1539,6 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
-<<<<<<< HEAD
-=======
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 %in, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -1750,10 +1741,6 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: const_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -1770,11 +1757,6 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX12DAGISEL-NEXT:    s_wait_kmcnt 0x0
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
-<<<<<<< HEAD
-=======
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 123, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -1975,10 +1957,6 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: poison_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -1995,11 +1973,6 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
-<<<<<<< HEAD
-=======
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 poison, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -2010,19 +1983,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-LABEL: divergent_value_i64:
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
 ; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX8DAGISEL-NEXT:    s_mov_b32 s4, 0
-; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX8DAGISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
-; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX8DAGISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2042,19 +2004,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL-LABEL: divergent_value_i64:
 ; GFX8GISEL:       ; %bb.0: ; %entry
 ; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX8GISEL-NEXT:    s_mov_b32 s4, 0
-; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX8GISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
 ; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8GISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX8GISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2074,19 +2025,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL-LABEL: divergent_value_i64:
 ; GFX9DAGISEL:       ; %bb.0: ; %entry
 ; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX9DAGISEL-NEXT:    s_mov_b32 s4, 0
-; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX9DAGISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
-; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX9DAGISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2106,19 +2046,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL-LABEL: divergent_value_i64:
 ; GFX9GISEL:       ; %bb.0: ; %entry
 ; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX9GISEL-NEXT:    s_mov_b32 s4, 0
-; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX9GISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
 ; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9GISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX9GISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2138,19 +2067,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL-LABEL: divergent_value_i64:
 ; GFX1064DAGISEL:       ; %bb.0: ; %entry
 ; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, 0
-; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1064DAGISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
-; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX1064DAGISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2169,19 +2087,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL-LABEL: divergent_value_i64:
 ; GFX1064GISEL:       ; %bb.0: ; %entry
 ; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX1064GISEL-NEXT:    s_mov_b32 s4, 0
-; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1064GISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX1064GISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2200,19 +2107,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL-LABEL: divergent_value_i64:
 ; GFX1032DAGISEL:       ; %bb.0: ; %entry
 ; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
-=======
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, 0
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
-; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032DAGISEL-NEXT:    v_readlane_b32 s8, v2, s7
@@ -2231,19 +2127,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL-LABEL: divergent_value_i64:
 ; GFX1032GISEL:       ; %bb.0: ; %entry
 ; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
-=======
-; GFX1032GISEL-NEXT:    s_mov_b32 s4, 0
-; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
-; GFX1032GISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
-; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032GISEL-NEXT:    v_readlane_b32 s8, v2, s7
@@ -2262,27 +2147,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL-LABEL: divergent_value_i64:
 ; GFX1164DAGISEL:       ; %bb.0: ; %entry
 ; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
-; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
-=======
-; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, 0
-=======
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
-<<<<<<< HEAD
-; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v2, s4
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s6, v3, s4
 ; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s4
@@ -2299,27 +2168,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL-LABEL: divergent_value_i64:
 ; GFX1164GISEL:       ; %bb.0: ; %entry
 ; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
-=======
-; GFX1164GISEL-NEXT:    s_mov_b32 s0, 0
-=======
-; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], 0
->>>>>>> 24ec1cb8e95b (Marking dead scc)
-; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
-<<<<<<< HEAD
-; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v2, s4
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s6, v3, s4
 ; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s4
@@ -2336,27 +2189,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL-LABEL: divergent_value_i64:
 ; GFX1132DAGISEL:       ; %bb.0: ; %entry
 ; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
-=======
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, 0
-=======
-; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
->>>>>>> 24ec1cb8e95b (Marking dead scc)
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
-<<<<<<< HEAD
-; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
 ; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -2372,27 +2209,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-LABEL: divergent_value_i64:
 ; GFX1132GISEL:       ; %bb.0: ; %entry
 ; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
-=======
-; GFX1132GISEL-NEXT:    s_mov_b32 s0, 0
-=======
-; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], 0
->>>>>>> 24ec1cb8e95b (Marking dead scc)
-; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
-<<<<<<< HEAD
-; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
 ; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -2404,10 +2225,6 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
 ; GFX1132GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX1132GISEL-NEXT:    s_setpc_b64 s[30:31]
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: divergent_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -2434,11 +2251,6 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
 ; GFX12DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX12DAGISEL-NEXT:    s_setpc_b64 s[30:31]
-<<<<<<< HEAD
-=======
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 %id.x, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -2916,10 +2728,6 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: divergent_cfg_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -2963,11 +2771,6 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX12DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
-<<<<<<< HEAD
-=======
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %d_cmp = icmp ult i32 %tid, 16
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.and.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.and.ll
index 55e6189f65675..08a978b0b34c1 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.and.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.and.ll
@@ -1269,9 +1269,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-LABEL: divergent_value_i64:
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
 ; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8DAGISEL-NEXT:    s_mov_b32 s4, -1
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX8DAGISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
 ; GFX8DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
@@ -1290,9 +1289,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL-LABEL: divergent_value_i64:
 ; GFX8GISEL:       ; %bb.0: ; %entry
 ; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8GISEL-NEXT:    s_mov_b32 s4, -1
+; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX8GISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8GISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
 ; GFX8GISEL-NEXT:    v_readlane_b32 s8, v2, s10
@@ -1311,9 +1309,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL-LABEL: divergent_value_i64:
 ; GFX9DAGISEL:       ; %bb.0: ; %entry
 ; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9DAGISEL-NEXT:    s_mov_b32 s4, -1
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX9DAGISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
 ; GFX9DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
@@ -1332,9 +1329,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL-LABEL: divergent_value_i64:
 ; GFX9GISEL:       ; %bb.0: ; %entry
 ; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9GISEL-NEXT:    s_mov_b32 s4, -1
+; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX9GISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9GISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
 ; GFX9GISEL-NEXT:    v_readlane_b32 s8, v2, s10
@@ -1353,9 +1349,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL-LABEL: divergent_value_i64:
 ; GFX1064DAGISEL:       ; %bb.0: ; %entry
 ; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, -1
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1064DAGISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
 ; GFX1064DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
@@ -1373,9 +1368,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL-LABEL: divergent_value_i64:
 ; GFX1064GISEL:       ; %bb.0: ; %entry
 ; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1064GISEL-NEXT:    s_mov_b32 s4, -1
+; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1064GISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
 ; GFX1064GISEL-NEXT:    v_readlane_b32 s8, v2, s10
@@ -1393,9 +1387,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL-LABEL: divergent_value_i64:
 ; GFX1032DAGISEL:       ; %bb.0: ; %entry
 ; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, -1
+; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032DAGISEL-NEXT:    v_readlane_b32 s8, v2, s7
@@ -1413,9 +1406,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL-LABEL: divergent_value_i64:
 ; GFX1032GISEL:       ; %bb.0: ; %entry
 ; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1032GISEL-NEXT:    s_mov_b32 s4, -1
+; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
-; GFX1032GISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032GISEL-NEXT:    v_readlane_b32 s8, v2, s7
@@ -1433,12 +1425,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL-LABEL: divergent_value_i64:
 ; GFX1164DAGISEL:       ; %bb.0: ; %entry
 ; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], -1
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s6, s[2:3]
-; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s4, v2, s6
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v3, s6
 ; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s6
@@ -1454,12 +1445,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL-LABEL: divergent_value_i64:
 ; GFX1164GISEL:       ; %bb.0: ; %entry
 ; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1164GISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], -1
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s6, s[2:3]
-; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s4, v2, s6
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v3, s6
 ; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s6
@@ -1475,12 +1465,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL-LABEL: divergent_value_i64:
 ; GFX1132DAGISEL:       ; %bb.0: ; %entry
 ; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], -1
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
-; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
 ; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -1495,12 +1484,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-LABEL: divergent_value_i64:
 ; GFX1132GISEL:       ; %bb.0: ; %entry
 ; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1132GISEL-NEXT:    s_mov_b32 s0, -1
+; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], -1
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
-; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
 ; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
index f381a82660491..ace65a03a5abb 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
@@ -1269,21 +1269,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-LABEL: divergent_value_i64:
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
 ; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX8DAGISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX8DAGISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX8DAGISEL-NEXT:    s_brev_b32 s5, 1
-; GFX8DAGISEL-NEXT:    s_mov_b32 s4, 0
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX8DAGISEL-NEXT:    s_mov_b32 s4, 0
-; GFX8DAGISEL-NEXT:    s_brev_b32 s5, 1
-; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1306,21 +1294,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL-LABEL: divergent_value_i64:
 ; GFX8GISEL:       ; %bb.0: ; %entry
 ; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX8GISEL-NEXT:    s_mov_b32 s4, 0
-; GFX8GISEL-NEXT:    s_brev_b32 s5, 1
-; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX8GISEL-NEXT:    s_brev_b32 s5, 1
-; GFX8GISEL-NEXT:    s_mov_b32 s4, 0
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX8GISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX8GISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX8GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1343,21 +1319,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL-LABEL: divergent_value_i64:
 ; GFX9DAGISEL:       ; %bb.0: ; %entry
 ; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX9DAGISEL-NEXT:    s_mov_b32 s4, 0
-; GFX9DAGISEL-NEXT:    s_brev_b32 s5, 1
-; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX9DAGISEL-NEXT:    s_brev_b32 s5, 1
-; GFX9DAGISEL-NEXT:    s_mov_b32 s4, 0
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX9DAGISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX9DAGISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1380,21 +1344,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL-LABEL: divergent_value_i64:
 ; GFX9GISEL:       ; %bb.0: ; %entry
 ; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX9GISEL-NEXT:    s_mov_b32 s4, 0
-; GFX9GISEL-NEXT:    s_brev_b32 s5, 1
-; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX9GISEL-NEXT:    s_brev_b32 s5, 1
-; GFX9GISEL-NEXT:    s_mov_b32 s4, 0
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX9GISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX9GISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX9GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1417,21 +1369,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL-LABEL: divergent_value_i64:
 ; GFX1064DAGISEL:       ; %bb.0: ; %entry
 ; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, 0
-; GFX1064DAGISEL-NEXT:    s_brev_b32 s5, 1
-; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1064DAGISEL-NEXT:    s_brev_b32 s5, 1
-; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, 0
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX1064DAGISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1453,21 +1393,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL-LABEL: divergent_value_i64:
 ; GFX1064GISEL:       ; %bb.0: ; %entry
 ; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX1064GISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX1064GISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1064GISEL-NEXT:    s_brev_b32 s5, 1
-; GFX1064GISEL-NEXT:    s_mov_b32 s4, 0
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX1064GISEL-NEXT:    s_mov_b32 s4, 0
-; GFX1064GISEL-NEXT:    s_brev_b32 s5, 1
-; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX1064GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1489,21 +1417,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL-LABEL: divergent_value_i64:
 ; GFX1032DAGISEL:       ; %bb.0: ; %entry
 ; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, 0
-; GFX1032DAGISEL-NEXT:    s_brev_b32 s5, 1
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
-=======
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
-; GFX1032DAGISEL-NEXT:    s_brev_b32 s5, 1
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, 0
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX1032DAGISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1525,21 +1441,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL-LABEL: divergent_value_i64:
 ; GFX1032GISEL:       ; %bb.0: ; %entry
 ; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1032GISEL-NEXT:    s_mov_b32 s4, 0
-; GFX1032GISEL-NEXT:    s_brev_b32 s5, 1
-; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
-=======
-; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
-; GFX1032GISEL-NEXT:    s_brev_b32 s5, 1
-; GFX1032GISEL-NEXT:    s_mov_b32 s4, 0
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX1032GISEL-NEXT:    s_mov_b32 s4, 0
 ; GFX1032GISEL-NEXT:    s_brev_b32 s5, 1
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1561,39 +1465,16 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL-LABEL: divergent_value_i64:
 ; GFX1164DAGISEL:       ; %bb.0: ; %entry
 ; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, 0
-; GFX1164DAGISEL-NEXT:    s_brev_b32 s1, 1
-; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
-=======
-; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT:    s_brev_b32 s1, 1
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, 0
 ; GFX1164DAGISEL-NEXT:    s_brev_b32 s1, 1
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s4, v2, s8
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v3, s8
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164DAGISEL-NEXT:    v_cmp_gt_i64_e32 vcc, s[4:5], v[4:5]
 ; GFX1164DAGISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
 ; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s8
@@ -1609,39 +1490,16 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL-LABEL: divergent_value_i64:
 ; GFX1164GISEL:       ; %bb.0: ; %entry
 ; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1164GISEL-NEXT:    s_mov_b32 s0, 0
-; GFX1164GISEL-NEXT:    s_brev_b32 s1, 1
-; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
-=======
-; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT:    s_brev_b32 s1, 1
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164GISEL-NEXT:    s_mov_b32 s0, 0
 ; GFX1164GISEL-NEXT:    s_brev_b32 s1, 1
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s4, v2, s8
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v3, s8
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164GISEL-NEXT:    v_cmp_gt_i64_e32 vcc, s[4:5], v[4:5]
 ; GFX1164GISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
 ; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s8
@@ -1657,38 +1515,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL-LABEL: divergent_value_i64:
 ; GFX1132DAGISEL:       ; %bb.0: ; %entry
 ; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, 0
-; GFX1132DAGISEL-NEXT:    s_brev_b32 s1, 1
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-=======
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT:    s_brev_b32 s1, 1
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, 0
 ; GFX1132DAGISEL-NEXT:    s_brev_b32 s1, 1
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
 ; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132DAGISEL-NEXT:    v_cmp_gt_i64_e32 vcc_lo, s[4:5], v[4:5]
 ; GFX1132DAGISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
 ; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -1703,38 +1538,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-LABEL: divergent_value_i64:
 ; GFX1132GISEL:       ; %bb.0: ; %entry
 ; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1132GISEL-NEXT:    s_mov_b32 s0, 0
-; GFX1132GISEL-NEXT:    s_brev_b32 s1, 1
-; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-=======
-; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT:    s_brev_b32 s1, 1
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132GISEL-NEXT:    s_mov_b32 s0, 0
 ; GFX1132GISEL-NEXT:    s_brev_b32 s1, 1
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132GISEL-NEXT:    v_cmp_gt_i64_e32 vcc_lo, s[4:5], v[4:5]
 ; GFX1132GISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
 ; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
index 7a83d7fa5ced2..b12537eb0cebe 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
@@ -1269,21 +1269,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-LABEL: divergent_value_i64:
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
 ; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX8DAGISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX8DAGISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX8DAGISEL-NEXT:    s_brev_b32 s5, -2
-; GFX8DAGISEL-NEXT:    s_mov_b32 s4, -1
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX8DAGISEL-NEXT:    s_mov_b32 s4, -1
-; GFX8DAGISEL-NEXT:    s_brev_b32 s5, -2
-; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1306,21 +1294,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL-LABEL: divergent_value_i64:
 ; GFX8GISEL:       ; %bb.0: ; %entry
 ; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX8GISEL-NEXT:    s_mov_b32 s4, -1
-; GFX8GISEL-NEXT:    s_brev_b32 s5, -2
-; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX8GISEL-NEXT:    s_brev_b32 s5, -2
-; GFX8GISEL-NEXT:    s_mov_b32 s4, -1
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX8GISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX8GISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX8GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1343,21 +1319,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL-LABEL: divergent_value_i64:
 ; GFX9DAGISEL:       ; %bb.0: ; %entry
 ; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX9DAGISEL-NEXT:    s_mov_b32 s4, -1
-; GFX9DAGISEL-NEXT:    s_brev_b32 s5, -2
-; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX9DAGISEL-NEXT:    s_brev_b32 s5, -2
-; GFX9DAGISEL-NEXT:    s_mov_b32 s4, -1
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX9DAGISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX9DAGISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1380,21 +1344,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL-LABEL: divergent_value_i64:
 ; GFX9GISEL:       ; %bb.0: ; %entry
 ; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX9GISEL-NEXT:    s_mov_b32 s4, -1
-; GFX9GISEL-NEXT:    s_brev_b32 s5, -2
-; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX9GISEL-NEXT:    s_brev_b32 s5, -2
-; GFX9GISEL-NEXT:    s_mov_b32 s4, -1
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX9GISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX9GISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX9GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1417,21 +1369,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL-LABEL: divergent_value_i64:
 ; GFX1064DAGISEL:       ; %bb.0: ; %entry
 ; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, -1
-; GFX1064DAGISEL-NEXT:    s_brev_b32 s5, -2
-; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1064DAGISEL-NEXT:    s_brev_b32 s5, -2
-; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, -1
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX1064DAGISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1453,21 +1393,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL-LABEL: divergent_value_i64:
 ; GFX1064GISEL:       ; %bb.0: ; %entry
 ; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX1064GISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX1064GISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1064GISEL-NEXT:    s_brev_b32 s5, -2
-; GFX1064GISEL-NEXT:    s_mov_b32 s4, -1
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX1064GISEL-NEXT:    s_mov_b32 s4, -1
-; GFX1064GISEL-NEXT:    s_brev_b32 s5, -2
-; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX1064GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1489,21 +1417,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL-LABEL: divergent_value_i64:
 ; GFX1032DAGISEL:       ; %bb.0: ; %entry
 ; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, -1
-; GFX1032DAGISEL-NEXT:    s_brev_b32 s5, -2
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
-=======
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
-; GFX1032DAGISEL-NEXT:    s_brev_b32 s5, -2
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, -1
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX1032DAGISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1525,21 +1441,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL-LABEL: divergent_value_i64:
 ; GFX1032GISEL:       ; %bb.0: ; %entry
 ; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1032GISEL-NEXT:    s_mov_b32 s4, -1
-; GFX1032GISEL-NEXT:    s_brev_b32 s5, -2
-; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
-=======
-; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
-; GFX1032GISEL-NEXT:    s_brev_b32 s5, -2
-; GFX1032GISEL-NEXT:    s_mov_b32 s4, -1
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX1032GISEL-NEXT:    s_mov_b32 s4, -1
 ; GFX1032GISEL-NEXT:    s_brev_b32 s5, -2
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1561,39 +1465,16 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL-LABEL: divergent_value_i64:
 ; GFX1164DAGISEL:       ; %bb.0: ; %entry
 ; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, -1
-; GFX1164DAGISEL-NEXT:    s_brev_b32 s1, -2
-; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
-=======
-; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT:    s_brev_b32 s1, -2
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, -1
 ; GFX1164DAGISEL-NEXT:    s_brev_b32 s1, -2
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s4, v2, s8
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v3, s8
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164DAGISEL-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[4:5]
 ; GFX1164DAGISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
 ; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s8
@@ -1609,39 +1490,16 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL-LABEL: divergent_value_i64:
 ; GFX1164GISEL:       ; %bb.0: ; %entry
 ; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1164GISEL-NEXT:    s_mov_b32 s0, -1
-; GFX1164GISEL-NEXT:    s_brev_b32 s1, -2
-; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
-=======
-; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT:    s_brev_b32 s1, -2
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164GISEL-NEXT:    s_mov_b32 s0, -1
 ; GFX1164GISEL-NEXT:    s_brev_b32 s1, -2
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s4, v2, s8
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v3, s8
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164GISEL-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[4:5]
 ; GFX1164GISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
 ; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s8
@@ -1657,38 +1515,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL-LABEL: divergent_value_i64:
 ; GFX1132DAGISEL:       ; %bb.0: ; %entry
 ; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, -1
-; GFX1132DAGISEL-NEXT:    s_brev_b32 s1, -2
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-=======
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT:    s_brev_b32 s1, -2
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, -1
 ; GFX1132DAGISEL-NEXT:    s_brev_b32 s1, -2
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
 ; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132DAGISEL-NEXT:    v_cmp_lt_i64_e32 vcc_lo, s[4:5], v[4:5]
 ; GFX1132DAGISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
 ; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -1703,38 +1538,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-LABEL: divergent_value_i64:
 ; GFX1132GISEL:       ; %bb.0: ; %entry
 ; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1132GISEL-NEXT:    s_mov_b32 s0, -1
-; GFX1132GISEL-NEXT:    s_brev_b32 s1, -2
-; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-=======
-; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT:    s_brev_b32 s1, -2
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132GISEL-NEXT:    s_mov_b32 s0, -1
 ; GFX1132GISEL-NEXT:    s_brev_b32 s1, -2
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132GISEL-NEXT:    v_cmp_lt_i64_e32 vcc_lo, s[4:5], v[4:5]
 ; GFX1132GISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
 ; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.or.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.or.ll
index 1849eaecbe143..99b998b5ed177 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.or.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.or.ll
@@ -1269,9 +1269,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-LABEL: divergent_value_i64:
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
 ; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX8DAGISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
 ; GFX8DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
@@ -1290,9 +1289,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL-LABEL: divergent_value_i64:
 ; GFX8GISEL:       ; %bb.0: ; %entry
 ; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX8GISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8GISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
 ; GFX8GISEL-NEXT:    v_readlane_b32 s8, v2, s10
@@ -1311,9 +1309,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL-LABEL: divergent_value_i64:
 ; GFX9DAGISEL:       ; %bb.0: ; %entry
 ; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX9DAGISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
 ; GFX9DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
@@ -1332,9 +1329,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL-LABEL: divergent_value_i64:
 ; GFX9GISEL:       ; %bb.0: ; %entry
 ; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX9GISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9GISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
 ; GFX9GISEL-NEXT:    v_readlane_b32 s8, v2, s10
@@ -1353,9 +1349,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL-LABEL: divergent_value_i64:
 ; GFX1064DAGISEL:       ; %bb.0: ; %entry
 ; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1064DAGISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
 ; GFX1064DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
@@ -1373,9 +1368,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL-LABEL: divergent_value_i64:
 ; GFX1064GISEL:       ; %bb.0: ; %entry
 ; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1064GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1064GISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
 ; GFX1064GISEL-NEXT:    v_readlane_b32 s8, v2, s10
@@ -1393,9 +1387,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL-LABEL: divergent_value_i64:
 ; GFX1032DAGISEL:       ; %bb.0: ; %entry
 ; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032DAGISEL-NEXT:    v_readlane_b32 s8, v2, s7
@@ -1413,9 +1406,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL-LABEL: divergent_value_i64:
 ; GFX1032GISEL:       ; %bb.0: ; %entry
 ; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1032GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
-; GFX1032GISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032GISEL-NEXT:    v_readlane_b32 s8, v2, s7
@@ -1433,12 +1425,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL-LABEL: divergent_value_i64:
 ; GFX1164DAGISEL:       ; %bb.0: ; %entry
 ; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s6, s[2:3]
-; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s4, v2, s6
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v3, s6
 ; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s6
@@ -1454,12 +1445,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL-LABEL: divergent_value_i64:
 ; GFX1164GISEL:       ; %bb.0: ; %entry
 ; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1164GISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s6, s[2:3]
-; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s4, v2, s6
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v3, s6
 ; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s6
@@ -1475,12 +1465,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL-LABEL: divergent_value_i64:
 ; GFX1132DAGISEL:       ; %bb.0: ; %entry
 ; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
-; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
 ; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -1495,12 +1484,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-LABEL: divergent_value_i64:
 ; GFX1132GISEL:       ; %bb.0: ; %entry
 ; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1132GISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
-; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
 ; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
index 88637bcf882a7..bc8bf7f4b3b6f 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
@@ -1629,10 +1629,6 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: uniform_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -1654,11 +1650,6 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
-<<<<<<< HEAD
-=======
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 %in, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -1911,10 +1902,6 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: const_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -1936,11 +1923,6 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
 ; GFX12DAGISEL-NEXT:    s_wait_kmcnt 0x0
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
-<<<<<<< HEAD
-=======
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 123, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -2193,10 +2175,6 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: poison_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -2218,11 +2196,6 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
 ; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
-<<<<<<< HEAD
-=======
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 poison, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -2233,19 +2206,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-LABEL: divergent_value_i64:
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
 ; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX8DAGISEL-NEXT:    s_mov_b32 s4, 0
-; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX8DAGISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
-; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX8DAGISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2265,19 +2227,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL-LABEL: divergent_value_i64:
 ; GFX8GISEL:       ; %bb.0: ; %entry
 ; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX8GISEL-NEXT:    s_mov_b32 s4, 0
-; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX8GISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
 ; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8GISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX8GISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2297,19 +2248,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL-LABEL: divergent_value_i64:
 ; GFX9DAGISEL:       ; %bb.0: ; %entry
 ; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX9DAGISEL-NEXT:    s_mov_b32 s4, 0
-; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX9DAGISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
-; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX9DAGISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2329,19 +2269,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL-LABEL: divergent_value_i64:
 ; GFX9GISEL:       ; %bb.0: ; %entry
 ; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX9GISEL-NEXT:    s_mov_b32 s4, 0
-; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX9GISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
 ; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9GISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX9GISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2361,19 +2290,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL-LABEL: divergent_value_i64:
 ; GFX1064DAGISEL:       ; %bb.0: ; %entry
 ; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, 0
-; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1064DAGISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
-; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX1064DAGISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2392,19 +2310,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL-LABEL: divergent_value_i64:
 ; GFX1064GISEL:       ; %bb.0: ; %entry
 ; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX1064GISEL-NEXT:    s_mov_b32 s4, 0
-; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1064GISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s8, s[6:7]
 ; GFX1064GISEL-NEXT:    v_readlane_b32 s9, v2, s8
@@ -2423,19 +2330,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL-LABEL: divergent_value_i64:
 ; GFX1032DAGISEL:       ; %bb.0: ; %entry
 ; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
-=======
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, 0
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
-; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032DAGISEL-NEXT:    v_readlane_b32 s8, v2, s7
@@ -2454,19 +2350,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL-LABEL: divergent_value_i64:
 ; GFX1032GISEL:       ; %bb.0: ; %entry
 ; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
-=======
-; GFX1032GISEL-NEXT:    s_mov_b32 s4, 0
-; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
-; GFX1032GISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
-; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032GISEL-NEXT:    v_readlane_b32 s8, v2, s7
@@ -2485,27 +2370,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL-LABEL: divergent_value_i64:
 ; GFX1164DAGISEL:       ; %bb.0: ; %entry
 ; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
-; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
-=======
-; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, 0
-=======
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
-<<<<<<< HEAD
-; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v2, s4
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s6, v3, s4
 ; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s4
@@ -2522,27 +2391,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL-LABEL: divergent_value_i64:
 ; GFX1164GISEL:       ; %bb.0: ; %entry
 ; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
-=======
-; GFX1164GISEL-NEXT:    s_mov_b32 s0, 0
-=======
-; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], 0
->>>>>>> 24ec1cb8e95b (Marking dead scc)
-; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s4, s[2:3]
-<<<<<<< HEAD
-; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v2, s4
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s6, v3, s4
 ; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s4
@@ -2559,27 +2412,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL-LABEL: divergent_value_i64:
 ; GFX1132DAGISEL:       ; %bb.0: ; %entry
 ; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
-=======
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, 0
-=======
-; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
->>>>>>> 24ec1cb8e95b (Marking dead scc)
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
-<<<<<<< HEAD
-; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
 ; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -2595,27 +2432,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-LABEL: divergent_value_i64:
 ; GFX1132GISEL:       ; %bb.0: ; %entry
 ; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
-=======
-; GFX1132GISEL-NEXT:    s_mov_b32 s0, 0
-=======
-; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], 0
->>>>>>> 24ec1cb8e95b (Marking dead scc)
-; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
-<<<<<<< HEAD
-; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
->>>>>>> 24ec1cb8e95b (Marking dead scc)
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
 ; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -2627,10 +2448,6 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
 ; GFX1132GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX1132GISEL-NEXT:    s_setpc_b64 s[30:31]
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: divergent_value_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -2657,11 +2474,6 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX12DAGISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
 ; GFX12DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX12DAGISEL-NEXT:    s_setpc_b64 s[30:31]
-<<<<<<< HEAD
-=======
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 %id.x, i32 1)
   store i64 %result, ptr addrspace(1) %out
@@ -3244,10 +3056,6 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1132GISEL-NEXT:    s_endpgm
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 ;
 ; GFX12DAGISEL-LABEL: divergent_cfg_i64:
 ; GFX12DAGISEL:       ; %bb.0: ; %entry
@@ -3302,11 +3110,6 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
 ; GFX12DAGISEL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX12DAGISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX12DAGISEL-NEXT:    s_endpgm
-<<<<<<< HEAD
-=======
->>>>>>> 1332853dbfdf ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 2)
-=======
->>>>>>> 0239a94d07ff (Checking for targets with native 64-bit `add`/`sub` support)
 entry:
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %d_cmp = icmp ult i32 %tid, 16
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
index 3fc5bb103873b..1f848d577d2a4 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
@@ -1194,8 +1194,6 @@ entry:
   ret void
 }
 
-<<<<<<< HEAD
-<<<<<<< HEAD
 define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-LABEL: divergent_value_i64:
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
@@ -1476,300 +1474,6 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
 ; GFX1132GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX1132GISEL-NEXT:    s_setpc_b64 s[30:31]
-=======
-define amdgpu_kernel void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
-=======
-define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
-; GFX8DAGISEL-LABEL: divergent_value_i64:
-; GFX8DAGISEL:       ; %bb.0: ; %entry
-; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
-; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
-; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v5, s5
-; GFX8DAGISEL-NEXT:    v_readlane_b32 s8, v2, s12
-; GFX8DAGISEL-NEXT:    v_readlane_b32 s9, v3, s12
-; GFX8DAGISEL-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
-; GFX8DAGISEL-NEXT:    s_and_b64 s[10:11], vcc, s[6:7]
-; GFX8DAGISEL-NEXT:    s_bitset0_b64 s[6:7], s12
-; GFX8DAGISEL-NEXT:    s_cselect_b64 s[4:5], s[8:9], s[4:5]
-; GFX8DAGISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
-; GFX8DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
-; GFX8DAGISEL-NEXT:  ; %bb.2:
-; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
-; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
-; GFX8DAGISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
-; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0)
-; GFX8DAGISEL-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX8GISEL-LABEL: divergent_value_i64:
-; GFX8GISEL:       ; %bb.0: ; %entry
-; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX8GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
-; GFX8GISEL-NEXT:    v_mov_b32_e32 v4, s4
-; GFX8GISEL-NEXT:    v_mov_b32_e32 v5, s5
-; GFX8GISEL-NEXT:    v_readlane_b32 s8, v2, s12
-; GFX8GISEL-NEXT:    v_readlane_b32 s9, v3, s12
-; GFX8GISEL-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
-; GFX8GISEL-NEXT:    s_and_b64 s[10:11], vcc, s[6:7]
-; GFX8GISEL-NEXT:    s_bitset0_b64 s[6:7], s12
-; GFX8GISEL-NEXT:    s_cselect_b64 s[4:5], s[8:9], s[4:5]
-; GFX8GISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
-; GFX8GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
-; GFX8GISEL-NEXT:  ; %bb.2:
-; GFX8GISEL-NEXT:    v_mov_b32_e32 v2, s4
-; GFX8GISEL-NEXT:    v_mov_b32_e32 v3, s5
-; GFX8GISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
-; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0)
-; GFX8GISEL-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX9DAGISEL-LABEL: divergent_value_i64:
-; GFX9DAGISEL:       ; %bb.0: ; %entry
-; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
-; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
-; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v5, s5
-; GFX9DAGISEL-NEXT:    v_readlane_b32 s8, v2, s12
-; GFX9DAGISEL-NEXT:    v_readlane_b32 s9, v3, s12
-; GFX9DAGISEL-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
-; GFX9DAGISEL-NEXT:    s_and_b64 s[10:11], vcc, s[6:7]
-; GFX9DAGISEL-NEXT:    s_bitset0_b64 s[6:7], s12
-; GFX9DAGISEL-NEXT:    s_cselect_b64 s[4:5], s[8:9], s[4:5]
-; GFX9DAGISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
-; GFX9DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
-; GFX9DAGISEL-NEXT:  ; %bb.2:
-; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
-; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
-; GFX9DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
-; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0)
-; GFX9DAGISEL-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX9GISEL-LABEL: divergent_value_i64:
-; GFX9GISEL:       ; %bb.0: ; %entry
-; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX9GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
-; GFX9GISEL-NEXT:    v_mov_b32_e32 v4, s4
-; GFX9GISEL-NEXT:    v_mov_b32_e32 v5, s5
-; GFX9GISEL-NEXT:    v_readlane_b32 s8, v2, s12
-; GFX9GISEL-NEXT:    v_readlane_b32 s9, v3, s12
-; GFX9GISEL-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
-; GFX9GISEL-NEXT:    s_and_b64 s[10:11], vcc, s[6:7]
-; GFX9GISEL-NEXT:    s_bitset0_b64 s[6:7], s12
-; GFX9GISEL-NEXT:    s_cselect_b64 s[4:5], s[8:9], s[4:5]
-; GFX9GISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
-; GFX9GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
-; GFX9GISEL-NEXT:  ; %bb.2:
-; GFX9GISEL-NEXT:    v_mov_b32_e32 v2, s4
-; GFX9GISEL-NEXT:    v_mov_b32_e32 v3, s5
-; GFX9GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
-; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0)
-; GFX9GISEL-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX1064DAGISEL-LABEL: divergent_value_i64:
-; GFX1064DAGISEL:       ; %bb.0: ; %entry
-; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
-; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
-; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v5, s5
-; GFX1064DAGISEL-NEXT:    v_readlane_b32 s8, v2, s12
-; GFX1064DAGISEL-NEXT:    v_readlane_b32 s9, v3, s12
-; GFX1064DAGISEL-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
-; GFX1064DAGISEL-NEXT:    s_and_b64 s[10:11], vcc, s[6:7]
-; GFX1064DAGISEL-NEXT:    s_bitset0_b64 s[6:7], s12
-; GFX1064DAGISEL-NEXT:    s_cselect_b64 s[4:5], s[8:9], s[4:5]
-; GFX1064DAGISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
-; GFX1064DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
-; GFX1064DAGISEL-NEXT:  ; %bb.2:
-; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
-; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
-; GFX1064DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
-; GFX1064DAGISEL-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX1064GISEL-LABEL: divergent_value_i64:
-; GFX1064GISEL:       ; %bb.0: ; %entry
-; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
-; GFX1064GISEL-NEXT:    v_mov_b32_e32 v4, s4
-; GFX1064GISEL-NEXT:    v_mov_b32_e32 v5, s5
-; GFX1064GISEL-NEXT:    v_readlane_b32 s8, v2, s12
-; GFX1064GISEL-NEXT:    v_readlane_b32 s9, v3, s12
-; GFX1064GISEL-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
-; GFX1064GISEL-NEXT:    s_and_b64 s[10:11], vcc, s[6:7]
-; GFX1064GISEL-NEXT:    s_bitset0_b64 s[6:7], s12
-; GFX1064GISEL-NEXT:    s_cselect_b64 s[4:5], s[8:9], s[4:5]
-; GFX1064GISEL-NEXT:    s_cmp_lg_u64 s[6:7], 0
-; GFX1064GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
-; GFX1064GISEL-NEXT:  ; %bb.2:
-; GFX1064GISEL-NEXT:    v_mov_b32_e32 v2, s4
-; GFX1064GISEL-NEXT:    v_mov_b32_e32 v3, s5
-; GFX1064GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
-; GFX1064GISEL-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX1032DAGISEL-LABEL: divergent_value_i64:
-; GFX1032DAGISEL:       ; %bb.0: ; %entry
-; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
-; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
-; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
-; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v5, s5
-; GFX1032DAGISEL-NEXT:    v_readlane_b32 s8, v2, s7
-; GFX1032DAGISEL-NEXT:    v_readlane_b32 s9, v3, s7
-; GFX1032DAGISEL-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[8:9], v[4:5]
-; GFX1032DAGISEL-NEXT:    s_and_b32 s10, vcc_lo, s6
-; GFX1032DAGISEL-NEXT:    s_bitset0_b32 s6, s7
-; GFX1032DAGISEL-NEXT:    s_cselect_b64 s[4:5], s[8:9], s[4:5]
-; GFX1032DAGISEL-NEXT:    s_cmp_lg_u32 s6, 0
-; GFX1032DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
-; GFX1032DAGISEL-NEXT:  ; %bb.2:
-; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v2, s4
-; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v3, s5
-; GFX1032DAGISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
-; GFX1032DAGISEL-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX1032GISEL-LABEL: divergent_value_i64:
-; GFX1032GISEL:       ; %bb.0: ; %entry
-; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], 0
-; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
-; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
-; GFX1032GISEL-NEXT:    v_mov_b32_e32 v4, s4
-; GFX1032GISEL-NEXT:    v_mov_b32_e32 v5, s5
-; GFX1032GISEL-NEXT:    v_readlane_b32 s8, v2, s7
-; GFX1032GISEL-NEXT:    v_readlane_b32 s9, v3, s7
-; GFX1032GISEL-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[8:9], v[4:5]
-; GFX1032GISEL-NEXT:    s_and_b32 s10, vcc_lo, s6
-; GFX1032GISEL-NEXT:    s_bitset0_b32 s6, s7
-; GFX1032GISEL-NEXT:    s_cselect_b64 s[4:5], s[8:9], s[4:5]
-; GFX1032GISEL-NEXT:    s_cmp_lg_u32 s6, 0
-; GFX1032GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
-; GFX1032GISEL-NEXT:  ; %bb.2:
-; GFX1032GISEL-NEXT:    v_mov_b32_e32 v2, s4
-; GFX1032GISEL-NEXT:    v_mov_b32_e32 v3, s5
-; GFX1032GISEL-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
-; GFX1032GISEL-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX1164DAGISEL-LABEL: divergent_value_i64:
-; GFX1164DAGISEL:       ; %bb.0: ; %entry
-; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
-; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
-; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
-; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v4, s0
-; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v5, s1
-; GFX1164DAGISEL-NEXT:    v_readlane_b32 s4, v2, s8
-; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v3, s8
-; GFX1164DAGISEL-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[4:5]
-; GFX1164DAGISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
-; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s8
-; GFX1164DAGISEL-NEXT:    s_cselect_b64 s[0:1], s[4:5], s[0:1]
-; GFX1164DAGISEL-NEXT:    s_cmp_lg_u64 s[2:3], 0
-; GFX1164DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
-; GFX1164DAGISEL-NEXT:  ; %bb.2:
-; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v3, s1
-; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GFX1164DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
-; GFX1164DAGISEL-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX1164GISEL-LABEL: divergent_value_i64:
-; GFX1164GISEL:       ; %bb.0: ; %entry
-; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], 0
-; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
-; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
-; GFX1164GISEL-NEXT:    v_mov_b32_e32 v4, s0
-; GFX1164GISEL-NEXT:    v_mov_b32_e32 v5, s1
-; GFX1164GISEL-NEXT:    v_readlane_b32 s4, v2, s8
-; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v3, s8
-; GFX1164GISEL-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[4:5]
-; GFX1164GISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
-; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s8
-; GFX1164GISEL-NEXT:    s_cselect_b64 s[0:1], s[4:5], s[0:1]
-; GFX1164GISEL-NEXT:    s_cmp_lg_u64 s[2:3], 0
-; GFX1164GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
-; GFX1164GISEL-NEXT:  ; %bb.2:
-; GFX1164GISEL-NEXT:    v_mov_b32_e32 v3, s1
-; GFX1164GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GFX1164GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
-; GFX1164GISEL-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX1132DAGISEL-LABEL: divergent_value_i64:
-; GFX1132DAGISEL:       ; %bb.0: ; %entry
-; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
-; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
-; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
-; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
-; GFX1132DAGISEL-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[4:5], v[4:5]
-; GFX1132DAGISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
-; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
-; GFX1132DAGISEL-NEXT:    s_cselect_b64 s[0:1], s[4:5], s[0:1]
-; GFX1132DAGISEL-NEXT:    s_cmp_lg_u32 s2, 0
-; GFX1132DAGISEL-NEXT:    s_cbranch_scc1 .LBB8_1
-; GFX1132DAGISEL-NEXT:  ; %bb.2:
-; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
-; GFX1132DAGISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
-; GFX1132DAGISEL-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX1132GISEL-LABEL: divergent_value_i64:
-; GFX1132GISEL:       ; %bb.0: ; %entry
-<<<<<<< HEAD
-; GFX1132GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1132GISEL-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1132GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1132GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
-; GFX1132GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
-; GFX1132GISEL-NEXT:    s_endpgm
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], 0
-; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
-; GFX1132GISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
-; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
-; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
-; GFX1132GISEL-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[4:5], v[4:5]
-; GFX1132GISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
-; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
-; GFX1132GISEL-NEXT:    s_cselect_b64 s[0:1], s[4:5], s[0:1]
-; GFX1132GISEL-NEXT:    s_cmp_lg_u32 s2, 0
-; GFX1132GISEL-NEXT:    s_cbranch_scc1 .LBB8_1
-; GFX1132GISEL-NEXT:  ; %bb.2:
-; GFX1132GISEL-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
-; GFX1132GISEL-NEXT:    global_store_b64 v[0:1], v[2:3], off
-; GFX1132GISEL-NEXT:    s_setpc_b64 s[30:31]
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 entry:
   %result = call i64 @llvm.amdgcn.wave.reduce.umax.i64(i64 %id.x, i32 1)
   store i64 %result, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
index 29412bc1ed31d..c2cfb8828c30c 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
@@ -1198,19 +1198,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-LABEL: divergent_value_i64:
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
 ; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX8DAGISEL-NEXT:    s_mov_b32 s4, -1
-; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX8DAGISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
-; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX8DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1233,19 +1222,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL-LABEL: divergent_value_i64:
 ; GFX8GISEL:       ; %bb.0: ; %entry
 ; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX8GISEL-NEXT:    s_mov_b32 s4, -1
-; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX8GISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], -1
-; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX8GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1268,19 +1246,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL-LABEL: divergent_value_i64:
 ; GFX9DAGISEL:       ; %bb.0: ; %entry
 ; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
-; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX9DAGISEL-NEXT:    s_mov_b32 s4, -1
-; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX9DAGISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX9DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1303,19 +1270,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL-LABEL: divergent_value_i64:
 ; GFX9GISEL:       ; %bb.0: ; %entry
 ; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], -1
-; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX9GISEL-NEXT:    s_mov_b32 s4, -1
-; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX9GISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX9GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1338,19 +1294,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL-LABEL: divergent_value_i64:
 ; GFX1064DAGISEL:       ; %bb.0: ; %entry
 ; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
-; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, -1
-; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1064DAGISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX1064DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1372,19 +1317,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL-LABEL: divergent_value_i64:
 ; GFX1064GISEL:       ; %bb.0: ; %entry
 ; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
-=======
-; GFX1064GISEL-NEXT:    s_mov_b32 s4, -1
-; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1064GISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], -1
-; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s12, s[6:7]
 ; GFX1064GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1406,19 +1340,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL-LABEL: divergent_value_i64:
 ; GFX1032DAGISEL:       ; %bb.0: ; %entry
 ; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
-=======
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, -1
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], -1
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032DAGISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1440,19 +1363,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL-LABEL: divergent_value_i64:
 ; GFX1032GISEL:       ; %bb.0: ; %entry
 ; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], -1
-; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
-=======
-; GFX1032GISEL-NEXT:    s_mov_b32 s4, -1
-; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
-; GFX1032GISEL-NEXT:    s_mov_b32 s5, s4
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], -1
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032GISEL-NEXT:    v_mov_b32_e32 v4, s4
@@ -1474,36 +1386,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL-LABEL: divergent_value_i64:
 ; GFX1164DAGISEL:       ; %bb.0: ; %entry
 ; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], -1
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
-=======
-; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, -1
-=======
-; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], -1
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
-; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX1164DAGISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s4, v2, s8
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v3, s8
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164DAGISEL-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[4:5]
 ; GFX1164DAGISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
 ; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s8
@@ -1519,36 +1410,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL-LABEL: divergent_value_i64:
 ; GFX1164GISEL:       ; %bb.0: ; %entry
 ; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], -1
-; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
-=======
-; GFX1164GISEL-NEXT:    s_mov_b32 s0, -1
-=======
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], -1
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s8, s[2:3]
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX1164GISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s4, v2, s8
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v3, s8
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1164GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1164GISEL-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[4:5]
 ; GFX1164GISEL-NEXT:    s_and_b64 s[6:7], vcc, s[2:3]
 ; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s8
@@ -1564,35 +1434,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL-LABEL: divergent_value_i64:
 ; GFX1132DAGISEL:       ; %bb.0: ; %entry
 ; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], -1
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-=======
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, -1
-=======
 ; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], -1
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
 ; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
 ; GFX1132DAGISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132DAGISEL-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[4:5], v[4:5]
 ; GFX1132DAGISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
 ; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -1607,35 +1456,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-LABEL: divergent_value_i64:
 ; GFX1132GISEL:       ; %bb.0: ; %entry
 ; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
 ; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], -1
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-=======
-; GFX1132GISEL-NEXT:    s_mov_b32 s0, -1
-=======
-; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], -1
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
-; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
 ; GFX1132GISEL-NEXT:    v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1132GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
 ; GFX1132GISEL-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[4:5], v[4:5]
 ; GFX1132GISEL-NEXT:    s_and_b32 s6, vcc_lo, s2
 ; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll
index 9c523b2404121..be432c00de728 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll
@@ -1856,9 +1856,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8DAGISEL-LABEL: divergent_value_i64:
 ; GFX8DAGISEL:       ; %bb.0: ; %entry
 ; GFX8DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX8DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX8DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX8DAGISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX8DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
 ; GFX8DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
@@ -1877,9 +1876,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX8GISEL-LABEL: divergent_value_i64:
 ; GFX8GISEL:       ; %bb.0: ; %entry
 ; GFX8GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX8GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX8GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX8GISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX8GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX8GISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
 ; GFX8GISEL-NEXT:    v_readlane_b32 s8, v2, s10
@@ -1898,9 +1896,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9DAGISEL-LABEL: divergent_value_i64:
 ; GFX9DAGISEL:       ; %bb.0: ; %entry
 ; GFX9DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX9DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX9DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX9DAGISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX9DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
 ; GFX9DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
@@ -1919,9 +1916,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX9GISEL-LABEL: divergent_value_i64:
 ; GFX9GISEL:       ; %bb.0: ; %entry
 ; GFX9GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX9GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX9GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX9GISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX9GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX9GISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
 ; GFX9GISEL-NEXT:    v_readlane_b32 s8, v2, s10
@@ -1940,9 +1936,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064DAGISEL-LABEL: divergent_value_i64:
 ; GFX1064DAGISEL:       ; %bb.0: ; %entry
 ; GFX1064DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1064DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1064DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1064DAGISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1064DAGISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX1064DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064DAGISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
 ; GFX1064DAGISEL-NEXT:    v_readlane_b32 s8, v2, s10
@@ -1960,9 +1955,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1064GISEL-LABEL: divergent_value_i64:
 ; GFX1064GISEL:       ; %bb.0: ; %entry
 ; GFX1064GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1064GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1064GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1064GISEL-NEXT:    s_mov_b64 s[6:7], exec
-; GFX1064GISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX1064GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1064GISEL-NEXT:    s_ff1_i32_b64 s10, s[6:7]
 ; GFX1064GISEL-NEXT:    v_readlane_b32 s8, v2, s10
@@ -1980,9 +1974,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032DAGISEL-LABEL: divergent_value_i64:
 ; GFX1032DAGISEL:       ; %bb.0: ; %entry
 ; GFX1032DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1032DAGISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1032DAGISEL-NEXT:    s_mov_b32 s6, exec_lo
-; GFX1032DAGISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX1032DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032DAGISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032DAGISEL-NEXT:    v_readlane_b32 s8, v2, s7
@@ -2000,9 +1993,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1032GISEL-LABEL: divergent_value_i64:
 ; GFX1032GISEL:       ; %bb.0: ; %entry
 ; GFX1032GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1032GISEL-NEXT:    s_mov_b32 s4, 0
+; GFX1032GISEL-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX1032GISEL-NEXT:    s_mov_b32 s6, exec_lo
-; GFX1032GISEL-NEXT:    s_mov_b32 s5, s4
 ; GFX1032GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
 ; GFX1032GISEL-NEXT:    s_ff1_i32_b32 s7, s6
 ; GFX1032GISEL-NEXT:    v_readlane_b32 s8, v2, s7
@@ -2020,12 +2012,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164DAGISEL-LABEL: divergent_value_i64:
 ; GFX1164DAGISEL:       ; %bb.0: ; %entry
 ; GFX1164DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1164DAGISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1164DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1164DAGISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1164DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164DAGISEL-NEXT:    s_ctz_i32_b64 s6, s[2:3]
-; GFX1164DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s4, v2, s6
 ; GFX1164DAGISEL-NEXT:    v_readlane_b32 s5, v3, s6
 ; GFX1164DAGISEL-NEXT:    s_bitset0_b64 s[2:3], s6
@@ -2041,12 +2032,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1164GISEL-LABEL: divergent_value_i64:
 ; GFX1164GISEL:       ; %bb.0: ; %entry
 ; GFX1164GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1164GISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1164GISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1164GISEL-NEXT:    s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1164GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1164GISEL-NEXT:    s_ctz_i32_b64 s6, s[2:3]
-; GFX1164GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s4, v2, s6
 ; GFX1164GISEL-NEXT:    v_readlane_b32 s5, v3, s6
 ; GFX1164GISEL-NEXT:    s_bitset0_b64 s[2:3], s6
@@ -2062,12 +2052,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132DAGISEL-LABEL: divergent_value_i64:
 ; GFX1132DAGISEL:       ; %bb.0: ; %entry
 ; GFX1132DAGISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1132DAGISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1132DAGISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1132DAGISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132DAGISEL-NEXT:    s_ctz_i32_b32 s3, s2
-; GFX1132DAGISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132DAGISEL-NEXT:    v_readlane_b32 s5, v3, s3
 ; GFX1132DAGISEL-NEXT:    s_bitset0_b32 s2, s3
@@ -2082,12 +2071,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
 ; GFX1132GISEL-LABEL: divergent_value_i64:
 ; GFX1132GISEL:       ; %bb.0: ; %entry
 ; GFX1132GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1132GISEL-NEXT:    s_mov_b32 s0, 0
+; GFX1132GISEL-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX1132GISEL-NEXT:    s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT:    s_mov_b32 s1, s0
 ; GFX1132GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1132GISEL-NEXT:    s_ctz_i32_b32 s3, s2
-; GFX1132GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s4, v2, s3
 ; GFX1132GISEL-NEXT:    v_readlane_b32 s5, v3, s3
 ; GFX1132GISEL-NEXT:    s_bitset0_b32 s2, s3



More information about the llvm-commits mailing list