[llvm] [AMDGPU] Extending wave reduction intrinsics for `i64` types - 2 (PR #151309)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 10 04:50:01 PDT 2025
https://github.com/easyonaadit updated https://github.com/llvm/llvm-project/pull/151309
>From e90bc02af3d197118b48e0d4ec735908102efcac Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Sat, 19 Jul 2025 12:48:18 +0530
Subject: [PATCH 1/9] [AMDGPU] Extending wave reduction intrinsics for `i64`
types - 1
Supporting Min/Max Operations: `min`, `max`, `umin`, `umax`
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 12 +-
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll | 92 ++++++++++++++++
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll | 92 ++++++++++++++++
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll | 103 ++++++++++++++++++
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll | 92 ++++++++++++++++
5 files changed, 390 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 9c98f453fe331..c3f7cad12c6a6 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5273,12 +5273,16 @@ static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
static uint32_t getIdentityValueFor32BitWaveReduction(unsigned Opc) {
switch (Opc) {
case AMDGPU::S_MIN_U32:
+ case AMDGPU::V_CMP_LT_U64_e64: // umin.u64
return std::numeric_limits<uint32_t>::max();
case AMDGPU::S_MIN_I32:
+ case AMDGPU::V_CMP_LT_I64_e64: // min.i64
return std::numeric_limits<int32_t>::max();
case AMDGPU::S_MAX_U32:
+ case AMDGPU::V_CMP_GT_U64_e64: // umax.u64
return std::numeric_limits<uint32_t>::min();
case AMDGPU::S_MAX_I32:
+ case AMDGPU::V_CMP_GT_I64_e64: // max.i64
return std::numeric_limits<int32_t>::min();
case AMDGPU::S_ADD_I32:
case AMDGPU::S_SUB_I32:
@@ -5331,16 +5335,22 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
bool isSGPR = TRI->isSGPRClass(MRI.getRegClass(SrcReg));
Register DstReg = MI.getOperand(0).getReg();
MachineBasicBlock *RetBB = nullptr;
+ bool is32BitOpc = TRI->getRegSizeInBits(*MRI.getRegClass(DstReg)) == 32;
if (isSGPR) {
switch (Opc) {
case AMDGPU::S_MIN_U32:
+ case AMDGPU::V_CMP_LT_U64_e64: /*umin*/
case AMDGPU::S_MIN_I32:
+ case AMDGPU::V_CMP_LT_I64_e64: /*min*/
case AMDGPU::S_MAX_U32:
+ case AMDGPU::V_CMP_GT_U64_e64: /*umax*/
case AMDGPU::S_MAX_I32:
+ case AMDGPU::V_CMP_GT_I64_e64: /*max*/
case AMDGPU::S_AND_B32:
case AMDGPU::S_OR_B32: {
// Idempotent operations.
- BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MOV_B32), DstReg).addReg(SrcReg);
+ unsigned movOpc = is32BitOpc ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
+ BuildMI(BB, MI, DL, TII->get(movOpc), DstReg).addReg(SrcReg);
RetBB = &BB;
break;
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
index ace65a03a5abb..ae727d4f3f713 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
@@ -1269,9 +1269,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8DAGISEL-LABEL: divergent_value_i64:
; GFX8DAGISEL: ; %bb.0: ; %entry
; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX8DAGISEL-NEXT: s_mov_b32 s4, 0
; GFX8DAGISEL-NEXT: s_brev_b32 s5, 1
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+=======
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT: s_brev_b32 s5, 1
+; GFX8DAGISEL-NEXT: s_mov_b32 s4, 0
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1294,9 +1300,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8GISEL-LABEL: divergent_value_i64:
; GFX8GISEL: ; %bb.0: ; %entry
; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX8GISEL-NEXT: s_mov_b32 s4, 0
; GFX8GISEL-NEXT: s_brev_b32 s5, 1
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+=======
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT: s_brev_b32 s5, 1
+; GFX8GISEL-NEXT: s_mov_b32 s4, 0
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX8GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1319,9 +1331,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9DAGISEL-LABEL: divergent_value_i64:
; GFX9DAGISEL: ; %bb.0: ; %entry
; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX9DAGISEL-NEXT: s_mov_b32 s4, 0
; GFX9DAGISEL-NEXT: s_brev_b32 s5, 1
; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+=======
+; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT: s_brev_b32 s5, 1
+; GFX9DAGISEL-NEXT: s_mov_b32 s4, 0
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1344,9 +1362,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9GISEL-LABEL: divergent_value_i64:
; GFX9GISEL: ; %bb.0: ; %entry
; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX9GISEL-NEXT: s_mov_b32 s4, 0
; GFX9GISEL-NEXT: s_brev_b32 s5, 1
; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+=======
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT: s_brev_b32 s5, 1
+; GFX9GISEL-NEXT: s_mov_b32 s4, 0
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX9GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1369,9 +1393,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064DAGISEL-LABEL: divergent_value_i64:
; GFX1064DAGISEL: ; %bb.0: ; %entry
; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1064DAGISEL-NEXT: s_mov_b32 s4, 0
; GFX1064DAGISEL-NEXT: s_brev_b32 s5, 1
; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+=======
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT: s_brev_b32 s5, 1
+; GFX1064DAGISEL-NEXT: s_mov_b32 s4, 0
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1393,9 +1423,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064GISEL-LABEL: divergent_value_i64:
; GFX1064GISEL: ; %bb.0: ; %entry
; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1064GISEL-NEXT: s_mov_b32 s4, 0
; GFX1064GISEL-NEXT: s_brev_b32 s5, 1
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+=======
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT: s_brev_b32 s5, 1
+; GFX1064GISEL-NEXT: s_mov_b32 s4, 0
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX1064GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1417,9 +1453,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032DAGISEL-LABEL: divergent_value_i64:
; GFX1032DAGISEL: ; %bb.0: ; %entry
; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1032DAGISEL-NEXT: s_mov_b32 s4, 0
; GFX1032DAGISEL-NEXT: s_brev_b32 s5, 1
; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+=======
+; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT: s_brev_b32 s5, 1
+; GFX1032DAGISEL-NEXT: s_mov_b32 s4, 0
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1441,9 +1483,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032GISEL-LABEL: divergent_value_i64:
; GFX1032GISEL: ; %bb.0: ; %entry
; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1032GISEL-NEXT: s_mov_b32 s4, 0
; GFX1032GISEL-NEXT: s_brev_b32 s5, 1
; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+=======
+; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT: s_brev_b32 s5, 1
+; GFX1032GISEL-NEXT: s_mov_b32 s4, 0
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1465,16 +1513,27 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164DAGISEL-LABEL: divergent_value_i64:
; GFX1164DAGISEL: ; %bb.0: ; %entry
; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1164DAGISEL-NEXT: s_mov_b32 s0, 0
; GFX1164DAGISEL-NEXT: s_brev_b32 s1, 1
; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+=======
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT: s_brev_b32 s1, 1
+; GFX1164DAGISEL-NEXT: s_mov_b32 s0, 0
+; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v4, s0
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v5, s1
; GFX1164DAGISEL-NEXT: v_readlane_b32 s4, v2, s8
; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v3, s8
+<<<<<<< HEAD
+=======
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1164DAGISEL-NEXT: v_cmp_gt_i64_e32 vcc, s[4:5], v[4:5]
; GFX1164DAGISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s8
@@ -1490,16 +1549,27 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164GISEL-LABEL: divergent_value_i64:
; GFX1164GISEL: ; %bb.0: ; %entry
; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1164GISEL-NEXT: s_mov_b32 s0, 0
; GFX1164GISEL-NEXT: s_brev_b32 s1, 1
; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+=======
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT: s_brev_b32 s1, 1
+; GFX1164GISEL-NEXT: s_mov_b32 s0, 0
+; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1164GISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164GISEL-NEXT: v_mov_b32_e32 v4, s0
; GFX1164GISEL-NEXT: v_mov_b32_e32 v5, s1
; GFX1164GISEL-NEXT: v_readlane_b32 s4, v2, s8
; GFX1164GISEL-NEXT: v_readlane_b32 s5, v3, s8
+<<<<<<< HEAD
+=======
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1164GISEL-NEXT: v_cmp_gt_i64_e32 vcc, s[4:5], v[4:5]
; GFX1164GISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s8
@@ -1515,15 +1585,26 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132DAGISEL-LABEL: divergent_value_i64:
; GFX1132DAGISEL: ; %bb.0: ; %entry
; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1132DAGISEL-NEXT: s_mov_b32 s0, 0
; GFX1132DAGISEL-NEXT: s_brev_b32 s1, 1
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+=======
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT: s_brev_b32 s1, 1
+; GFX1132DAGISEL-NEXT: s_mov_b32 s0, 0
+; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
+<<<<<<< HEAD
+=======
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1132DAGISEL-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[4:5], v[4:5]
; GFX1132DAGISEL-NEXT: s_and_b32 s6, vcc_lo, s2
; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
@@ -1538,15 +1619,26 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132GISEL-LABEL: divergent_value_i64:
; GFX1132GISEL: ; %bb.0: ; %entry
; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1132GISEL-NEXT: s_mov_b32 s0, 0
; GFX1132GISEL-NEXT: s_brev_b32 s1, 1
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+=======
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT: s_brev_b32 s1, 1
+; GFX1132GISEL-NEXT: s_mov_b32 s0, 0
+; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132GISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
+<<<<<<< HEAD
+=======
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1132GISEL-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[4:5], v[4:5]
; GFX1132GISEL-NEXT: s_and_b32 s6, vcc_lo, s2
; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
index b12537eb0cebe..ebfc9e433a3d8 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
@@ -1269,9 +1269,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8DAGISEL-LABEL: divergent_value_i64:
; GFX8DAGISEL: ; %bb.0: ; %entry
; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX8DAGISEL-NEXT: s_mov_b32 s4, -1
; GFX8DAGISEL-NEXT: s_brev_b32 s5, -2
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+=======
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT: s_brev_b32 s5, -2
+; GFX8DAGISEL-NEXT: s_mov_b32 s4, -1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1294,9 +1300,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8GISEL-LABEL: divergent_value_i64:
; GFX8GISEL: ; %bb.0: ; %entry
; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX8GISEL-NEXT: s_mov_b32 s4, -1
; GFX8GISEL-NEXT: s_brev_b32 s5, -2
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+=======
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT: s_brev_b32 s5, -2
+; GFX8GISEL-NEXT: s_mov_b32 s4, -1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX8GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1319,9 +1331,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9DAGISEL-LABEL: divergent_value_i64:
; GFX9DAGISEL: ; %bb.0: ; %entry
; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX9DAGISEL-NEXT: s_mov_b32 s4, -1
; GFX9DAGISEL-NEXT: s_brev_b32 s5, -2
; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+=======
+; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT: s_brev_b32 s5, -2
+; GFX9DAGISEL-NEXT: s_mov_b32 s4, -1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1344,9 +1362,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9GISEL-LABEL: divergent_value_i64:
; GFX9GISEL: ; %bb.0: ; %entry
; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX9GISEL-NEXT: s_mov_b32 s4, -1
; GFX9GISEL-NEXT: s_brev_b32 s5, -2
; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+=======
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT: s_brev_b32 s5, -2
+; GFX9GISEL-NEXT: s_mov_b32 s4, -1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX9GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1369,9 +1393,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064DAGISEL-LABEL: divergent_value_i64:
; GFX1064DAGISEL: ; %bb.0: ; %entry
; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1064DAGISEL-NEXT: s_mov_b32 s4, -1
; GFX1064DAGISEL-NEXT: s_brev_b32 s5, -2
; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+=======
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT: s_brev_b32 s5, -2
+; GFX1064DAGISEL-NEXT: s_mov_b32 s4, -1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1393,9 +1423,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064GISEL-LABEL: divergent_value_i64:
; GFX1064GISEL: ; %bb.0: ; %entry
; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1064GISEL-NEXT: s_mov_b32 s4, -1
; GFX1064GISEL-NEXT: s_brev_b32 s5, -2
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+=======
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT: s_brev_b32 s5, -2
+; GFX1064GISEL-NEXT: s_mov_b32 s4, -1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX1064GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1417,9 +1453,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032DAGISEL-LABEL: divergent_value_i64:
; GFX1032DAGISEL: ; %bb.0: ; %entry
; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1032DAGISEL-NEXT: s_mov_b32 s4, -1
; GFX1032DAGISEL-NEXT: s_brev_b32 s5, -2
; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+=======
+; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT: s_brev_b32 s5, -2
+; GFX1032DAGISEL-NEXT: s_mov_b32 s4, -1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1441,9 +1483,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032GISEL-LABEL: divergent_value_i64:
; GFX1032GISEL: ; %bb.0: ; %entry
; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1032GISEL-NEXT: s_mov_b32 s4, -1
; GFX1032GISEL-NEXT: s_brev_b32 s5, -2
; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+=======
+; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT: s_brev_b32 s5, -2
+; GFX1032GISEL-NEXT: s_mov_b32 s4, -1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1465,16 +1513,27 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164DAGISEL-LABEL: divergent_value_i64:
; GFX1164DAGISEL: ; %bb.0: ; %entry
; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1164DAGISEL-NEXT: s_mov_b32 s0, -1
; GFX1164DAGISEL-NEXT: s_brev_b32 s1, -2
; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+=======
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT: s_brev_b32 s1, -2
+; GFX1164DAGISEL-NEXT: s_mov_b32 s0, -1
+; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v4, s0
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v5, s1
; GFX1164DAGISEL-NEXT: v_readlane_b32 s4, v2, s8
; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v3, s8
+<<<<<<< HEAD
+=======
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1164DAGISEL-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[4:5]
; GFX1164DAGISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s8
@@ -1490,16 +1549,27 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164GISEL-LABEL: divergent_value_i64:
; GFX1164GISEL: ; %bb.0: ; %entry
; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1164GISEL-NEXT: s_mov_b32 s0, -1
; GFX1164GISEL-NEXT: s_brev_b32 s1, -2
; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+=======
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT: s_brev_b32 s1, -2
+; GFX1164GISEL-NEXT: s_mov_b32 s0, -1
+; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1164GISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164GISEL-NEXT: v_mov_b32_e32 v4, s0
; GFX1164GISEL-NEXT: v_mov_b32_e32 v5, s1
; GFX1164GISEL-NEXT: v_readlane_b32 s4, v2, s8
; GFX1164GISEL-NEXT: v_readlane_b32 s5, v3, s8
+<<<<<<< HEAD
+=======
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1164GISEL-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[4:5]
; GFX1164GISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s8
@@ -1515,15 +1585,26 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132DAGISEL-LABEL: divergent_value_i64:
; GFX1132DAGISEL: ; %bb.0: ; %entry
; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1132DAGISEL-NEXT: s_mov_b32 s0, -1
; GFX1132DAGISEL-NEXT: s_brev_b32 s1, -2
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+=======
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT: s_brev_b32 s1, -2
+; GFX1132DAGISEL-NEXT: s_mov_b32 s0, -1
+; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
+<<<<<<< HEAD
+=======
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1132DAGISEL-NEXT: v_cmp_lt_i64_e32 vcc_lo, s[4:5], v[4:5]
; GFX1132DAGISEL-NEXT: s_and_b32 s6, vcc_lo, s2
; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
@@ -1538,15 +1619,26 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132GISEL-LABEL: divergent_value_i64:
; GFX1132GISEL: ; %bb.0: ; %entry
; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1132GISEL-NEXT: s_mov_b32 s0, -1
; GFX1132GISEL-NEXT: s_brev_b32 s1, -2
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+=======
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT: s_brev_b32 s1, -2
+; GFX1132GISEL-NEXT: s_mov_b32 s0, -1
+; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132GISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
+<<<<<<< HEAD
+=======
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1132GISEL-NEXT: v_cmp_lt_i64_e32 vcc_lo, s[4:5], v[4:5]
; GFX1132GISEL-NEXT: s_and_b32 s6, vcc_lo, s2
; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
index 1f848d577d2a4..2bcd0c2232a1b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
@@ -1194,6 +1194,7 @@ entry:
ret void
}
+<<<<<<< HEAD
define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8DAGISEL-LABEL: divergent_value_i64:
; GFX8DAGISEL: ; %bb.0: ; %entry
@@ -1474,6 +1475,108 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132GISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX1132GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX1132GISEL-NEXT: s_setpc_b64 s[30:31]
+=======
+define amdgpu_kernel void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
+; GFX8DAGISEL-LABEL: divergent_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: divergent_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: divergent_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: divergent_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: divergent_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: divergent_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: divergent_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: divergent_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: divergent_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: divergent_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
entry:
%result = call i64 @llvm.amdgcn.wave.reduce.umax.i64(i64 %id.x, i32 1)
store i64 %result, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
index c2cfb8828c30c..e641eeec4353c 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
@@ -1198,8 +1198,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8DAGISEL-LABEL: divergent_value_i64:
; GFX8DAGISEL: ; %bb.0: ; %entry
; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX8DAGISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+=======
+; GFX8DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT: s_mov_b32 s5, s4
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1222,8 +1228,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8GISEL-LABEL: divergent_value_i64:
; GFX8GISEL: ; %bb.0: ; %entry
; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX8GISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+=======
+; GFX8GISEL-NEXT: s_mov_b32 s4, -1
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT: s_mov_b32 s5, s4
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX8GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1246,8 +1258,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9DAGISEL-LABEL: divergent_value_i64:
; GFX9DAGISEL: ; %bb.0: ; %entry
; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+=======
+; GFX9DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT: s_mov_b32 s5, s4
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1270,8 +1288,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9GISEL-LABEL: divergent_value_i64:
; GFX9GISEL: ; %bb.0: ; %entry
; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX9GISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+=======
+; GFX9GISEL-NEXT: s_mov_b32 s4, -1
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT: s_mov_b32 s5, s4
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX9GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1294,8 +1318,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064DAGISEL-LABEL: divergent_value_i64:
; GFX1064DAGISEL: ; %bb.0: ; %entry
; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+=======
+; GFX1064DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT: s_mov_b32 s5, s4
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1317,8 +1347,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064GISEL-LABEL: divergent_value_i64:
; GFX1064GISEL: ; %bb.0: ; %entry
; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1064GISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+=======
+; GFX1064GISEL-NEXT: s_mov_b32 s4, -1
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT: s_mov_b32 s5, s4
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX1064GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1340,8 +1376,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032DAGISEL-LABEL: divergent_value_i64:
; GFX1032DAGISEL: ; %bb.0: ; %entry
; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1032DAGISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+=======
+; GFX1032DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT: s_mov_b32 s5, s4
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1363,8 +1405,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032GISEL-LABEL: divergent_value_i64:
; GFX1032GISEL: ; %bb.0: ; %entry
; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1032GISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+=======
+; GFX1032GISEL-NEXT: s_mov_b32 s4, -1
+; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT: s_mov_b32 s5, s4
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1386,15 +1434,26 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164DAGISEL-LABEL: divergent_value_i64:
; GFX1164DAGISEL: ; %bb.0: ; %entry
; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1164DAGISEL-NEXT: s_mov_b64 s[0:1], -1
; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+=======
+; GFX1164DAGISEL-NEXT: s_mov_b32 s0, -1
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT: s_mov_b32 s1, s0
+; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v4, s0
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v5, s1
; GFX1164DAGISEL-NEXT: v_readlane_b32 s4, v2, s8
; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v3, s8
+<<<<<<< HEAD
+=======
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1164DAGISEL-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[4:5]
; GFX1164DAGISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s8
@@ -1410,15 +1469,26 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164GISEL-LABEL: divergent_value_i64:
; GFX1164GISEL: ; %bb.0: ; %entry
; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1164GISEL-NEXT: s_mov_b64 s[0:1], -1
; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+=======
+; GFX1164GISEL-NEXT: s_mov_b32 s0, -1
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT: s_mov_b32 s1, s0
+; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1164GISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164GISEL-NEXT: v_mov_b32_e32 v4, s0
; GFX1164GISEL-NEXT: v_mov_b32_e32 v5, s1
; GFX1164GISEL-NEXT: v_readlane_b32 s4, v2, s8
; GFX1164GISEL-NEXT: v_readlane_b32 s5, v3, s8
+<<<<<<< HEAD
+=======
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1164GISEL-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[4:5]
; GFX1164GISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s8
@@ -1434,14 +1504,25 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132DAGISEL-LABEL: divergent_value_i64:
; GFX1132DAGISEL: ; %bb.0: ; %entry
; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1132DAGISEL-NEXT: s_mov_b64 s[0:1], -1
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+=======
+; GFX1132DAGISEL-NEXT: s_mov_b32 s0, -1
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT: s_mov_b32 s1, s0
+; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
+<<<<<<< HEAD
+=======
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1132DAGISEL-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[4:5], v[4:5]
; GFX1132DAGISEL-NEXT: s_and_b32 s6, vcc_lo, s2
; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
@@ -1456,14 +1537,25 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132GISEL-LABEL: divergent_value_i64:
; GFX1132GISEL: ; %bb.0: ; %entry
; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+<<<<<<< HEAD
; GFX1132GISEL-NEXT: s_mov_b64 s[0:1], -1
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+=======
+; GFX1132GISEL-NEXT: s_mov_b32 s0, -1
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT: s_mov_b32 s1, s0
+; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132GISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
+<<<<<<< HEAD
+=======
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
; GFX1132GISEL-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[4:5], v[4:5]
; GFX1132GISEL-NEXT: s_and_b32 s6, vcc_lo, s2
; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
>From b085d4be95a625a3247f3b3d65663b425d07fa94 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Wed, 13 Aug 2025 11:28:13 +0530
Subject: [PATCH 2/9] Addressing Review Comments
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index c3f7cad12c6a6..d99e8c28f6d85 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5339,18 +5339,22 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
if (isSGPR) {
switch (Opc) {
case AMDGPU::S_MIN_U32:
- case AMDGPU::V_CMP_LT_U64_e64: /*umin*/
case AMDGPU::S_MIN_I32:
- case AMDGPU::V_CMP_LT_I64_e64: /*min*/
case AMDGPU::S_MAX_U32:
- case AMDGPU::V_CMP_GT_U64_e64: /*umax*/
case AMDGPU::S_MAX_I32:
- case AMDGPU::V_CMP_GT_I64_e64: /*max*/
case AMDGPU::S_AND_B32:
case AMDGPU::S_OR_B32: {
// Idempotent operations.
- unsigned movOpc = is32BitOpc ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
- BuildMI(BB, MI, DL, TII->get(movOpc), DstReg).addReg(SrcReg);
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MOV_B32), DstReg).addReg(SrcReg);
+ RetBB = &BB;
+ break;
+ }
+ case AMDGPU::V_CMP_LT_U64_e64: // umin
+ case AMDGPU::V_CMP_LT_I64_e64: // min
+ case AMDGPU::V_CMP_GT_U64_e64: // umax
+ case AMDGPU::V_CMP_GT_I64_e64: { // max
+ // Idempotent operations.
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MOV_B64), DstReg).addReg(SrcReg);
RetBB = &BB;
break;
}
>From 7dffd7391d2bd1f2916330ee793c318a723dfc9c Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Wed, 20 Aug 2025 11:56:22 +0530
Subject: [PATCH 3/9] Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with
legality concerns.
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 11 +-
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll | 96 +++++
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll | 96 +++++
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll | 327 ++++++++++++++----
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll | 88 ++++-
5 files changed, 539 insertions(+), 79 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index d99e8c28f6d85..dc9af5393bba7 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5273,16 +5273,12 @@ static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
static uint32_t getIdentityValueFor32BitWaveReduction(unsigned Opc) {
switch (Opc) {
case AMDGPU::S_MIN_U32:
- case AMDGPU::V_CMP_LT_U64_e64: // umin.u64
return std::numeric_limits<uint32_t>::max();
case AMDGPU::S_MIN_I32:
- case AMDGPU::V_CMP_LT_I64_e64: // min.i64
return std::numeric_limits<int32_t>::max();
case AMDGPU::S_MAX_U32:
- case AMDGPU::V_CMP_GT_U64_e64: // umax.u64
return std::numeric_limits<uint32_t>::min();
case AMDGPU::S_MAX_I32:
- case AMDGPU::V_CMP_GT_I64_e64: // max.i64
return std::numeric_limits<int32_t>::min();
case AMDGPU::S_ADD_I32:
case AMDGPU::S_SUB_I32:
@@ -5335,7 +5331,6 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
bool isSGPR = TRI->isSGPRClass(MRI.getRegClass(SrcReg));
Register DstReg = MI.getOperand(0).getReg();
MachineBasicBlock *RetBB = nullptr;
- bool is32BitOpc = TRI->getRegSizeInBits(*MRI.getRegClass(DstReg)) == 32;
if (isSGPR) {
switch (Opc) {
case AMDGPU::S_MIN_U32:
@@ -5349,9 +5344,9 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
RetBB = &BB;
break;
}
- case AMDGPU::V_CMP_LT_U64_e64: // umin
- case AMDGPU::V_CMP_LT_I64_e64: // min
- case AMDGPU::V_CMP_GT_U64_e64: // umax
+ case AMDGPU::V_CMP_LT_U64_e64: // umin
+ case AMDGPU::V_CMP_LT_I64_e64: // min
+ case AMDGPU::V_CMP_GT_U64_e64: // umax
case AMDGPU::V_CMP_GT_I64_e64: { // max
// Idempotent operations.
BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MOV_B64), DstReg).addReg(SrcReg);
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
index ae727d4f3f713..f381a82660491 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
@@ -1270,6 +1270,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8DAGISEL: ; %bb.0: ; %entry
; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX8DAGISEL-NEXT: s_mov_b32 s4, 0
; GFX8DAGISEL-NEXT: s_brev_b32 s5, 1
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
@@ -1278,6 +1279,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8DAGISEL-NEXT: s_brev_b32 s5, 1
; GFX8DAGISEL-NEXT: s_mov_b32 s4, 0
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX8DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX8DAGISEL-NEXT: s_brev_b32 s5, 1
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1301,6 +1307,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8GISEL: ; %bb.0: ; %entry
; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX8GISEL-NEXT: s_mov_b32 s4, 0
; GFX8GISEL-NEXT: s_brev_b32 s5, 1
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
@@ -1309,6 +1316,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8GISEL-NEXT: s_brev_b32 s5, 1
; GFX8GISEL-NEXT: s_mov_b32 s4, 0
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX8GISEL-NEXT: s_mov_b32 s4, 0
+; GFX8GISEL-NEXT: s_brev_b32 s5, 1
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX8GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1332,6 +1344,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9DAGISEL: ; %bb.0: ; %entry
; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX9DAGISEL-NEXT: s_mov_b32 s4, 0
; GFX9DAGISEL-NEXT: s_brev_b32 s5, 1
; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
@@ -1340,6 +1353,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9DAGISEL-NEXT: s_brev_b32 s5, 1
; GFX9DAGISEL-NEXT: s_mov_b32 s4, 0
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX9DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX9DAGISEL-NEXT: s_brev_b32 s5, 1
+; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1363,6 +1381,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9GISEL: ; %bb.0: ; %entry
; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX9GISEL-NEXT: s_mov_b32 s4, 0
; GFX9GISEL-NEXT: s_brev_b32 s5, 1
; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
@@ -1371,6 +1390,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9GISEL-NEXT: s_brev_b32 s5, 1
; GFX9GISEL-NEXT: s_mov_b32 s4, 0
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX9GISEL-NEXT: s_mov_b32 s4, 0
+; GFX9GISEL-NEXT: s_brev_b32 s5, 1
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX9GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1394,6 +1418,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064DAGISEL: ; %bb.0: ; %entry
; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1064DAGISEL-NEXT: s_mov_b32 s4, 0
; GFX1064DAGISEL-NEXT: s_brev_b32 s5, 1
; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
@@ -1402,6 +1427,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064DAGISEL-NEXT: s_brev_b32 s5, 1
; GFX1064DAGISEL-NEXT: s_mov_b32 s4, 0
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1064DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX1064DAGISEL-NEXT: s_brev_b32 s5, 1
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1424,6 +1454,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064GISEL: ; %bb.0: ; %entry
; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1064GISEL-NEXT: s_mov_b32 s4, 0
; GFX1064GISEL-NEXT: s_brev_b32 s5, 1
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
@@ -1432,6 +1463,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064GISEL-NEXT: s_brev_b32 s5, 1
; GFX1064GISEL-NEXT: s_mov_b32 s4, 0
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1064GISEL-NEXT: s_mov_b32 s4, 0
+; GFX1064GISEL-NEXT: s_brev_b32 s5, 1
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX1064GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1454,6 +1490,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032DAGISEL: ; %bb.0: ; %entry
; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1032DAGISEL-NEXT: s_mov_b32 s4, 0
; GFX1032DAGISEL-NEXT: s_brev_b32 s5, 1
; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
@@ -1462,6 +1499,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032DAGISEL-NEXT: s_brev_b32 s5, 1
; GFX1032DAGISEL-NEXT: s_mov_b32 s4, 0
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1032DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX1032DAGISEL-NEXT: s_brev_b32 s5, 1
+; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1484,6 +1526,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032GISEL: ; %bb.0: ; %entry
; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1032GISEL-NEXT: s_mov_b32 s4, 0
; GFX1032GISEL-NEXT: s_brev_b32 s5, 1
; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
@@ -1492,6 +1535,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032GISEL-NEXT: s_brev_b32 s5, 1
; GFX1032GISEL-NEXT: s_mov_b32 s4, 0
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1032GISEL-NEXT: s_mov_b32 s4, 0
+; GFX1032GISEL-NEXT: s_brev_b32 s5, 1
+; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1514,6 +1562,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164DAGISEL: ; %bb.0: ; %entry
; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1164DAGISEL-NEXT: s_mov_b32 s0, 0
; GFX1164DAGISEL-NEXT: s_brev_b32 s1, 1
; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
@@ -1522,18 +1571,29 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
=======
; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164DAGISEL-NEXT: s_brev_b32 s1, 1
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164DAGISEL-NEXT: s_mov_b32 s0, 0
+; GFX1164DAGISEL-NEXT: s_brev_b32 s1, 1
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v4, s0
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v5, s1
; GFX1164DAGISEL-NEXT: v_readlane_b32 s4, v2, s8
; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v3, s8
<<<<<<< HEAD
+<<<<<<< HEAD
=======
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164DAGISEL-NEXT: v_cmp_gt_i64_e32 vcc, s[4:5], v[4:5]
; GFX1164DAGISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s8
@@ -1550,6 +1610,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164GISEL: ; %bb.0: ; %entry
; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1164GISEL-NEXT: s_mov_b32 s0, 0
; GFX1164GISEL-NEXT: s_brev_b32 s1, 1
; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
@@ -1558,18 +1619,29 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
=======
; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164GISEL-NEXT: s_brev_b32 s1, 1
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164GISEL-NEXT: s_mov_b32 s0, 0
+; GFX1164GISEL-NEXT: s_brev_b32 s1, 1
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164GISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164GISEL-NEXT: v_mov_b32_e32 v4, s0
; GFX1164GISEL-NEXT: v_mov_b32_e32 v5, s1
; GFX1164GISEL-NEXT: v_readlane_b32 s4, v2, s8
; GFX1164GISEL-NEXT: v_readlane_b32 s5, v3, s8
<<<<<<< HEAD
+<<<<<<< HEAD
=======
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164GISEL-NEXT: v_cmp_gt_i64_e32 vcc, s[4:5], v[4:5]
; GFX1164GISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s8
@@ -1586,6 +1658,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132DAGISEL: ; %bb.0: ; %entry
; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1132DAGISEL-NEXT: s_mov_b32 s0, 0
; GFX1132DAGISEL-NEXT: s_brev_b32 s1, 1
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
@@ -1594,17 +1667,28 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
=======
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132DAGISEL-NEXT: s_brev_b32 s1, 1
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132DAGISEL-NEXT: s_mov_b32 s0, 0
+; GFX1132DAGISEL-NEXT: s_brev_b32 s1, 1
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
<<<<<<< HEAD
+<<<<<<< HEAD
=======
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132DAGISEL-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[4:5], v[4:5]
; GFX1132DAGISEL-NEXT: s_and_b32 s6, vcc_lo, s2
; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
@@ -1620,6 +1704,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132GISEL: ; %bb.0: ; %entry
; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1132GISEL-NEXT: s_mov_b32 s0, 0
; GFX1132GISEL-NEXT: s_brev_b32 s1, 1
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
@@ -1628,17 +1713,28 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
=======
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132GISEL-NEXT: s_brev_b32 s1, 1
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132GISEL-NEXT: s_mov_b32 s0, 0
+; GFX1132GISEL-NEXT: s_brev_b32 s1, 1
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132GISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
<<<<<<< HEAD
+<<<<<<< HEAD
=======
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132GISEL-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[4:5], v[4:5]
; GFX1132GISEL-NEXT: s_and_b32 s6, vcc_lo, s2
; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
index ebfc9e433a3d8..7a83d7fa5ced2 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
@@ -1270,6 +1270,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8DAGISEL: ; %bb.0: ; %entry
; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX8DAGISEL-NEXT: s_mov_b32 s4, -1
; GFX8DAGISEL-NEXT: s_brev_b32 s5, -2
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
@@ -1278,6 +1279,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8DAGISEL-NEXT: s_brev_b32 s5, -2
; GFX8DAGISEL-NEXT: s_mov_b32 s4, -1
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX8DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX8DAGISEL-NEXT: s_brev_b32 s5, -2
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1301,6 +1307,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8GISEL: ; %bb.0: ; %entry
; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX8GISEL-NEXT: s_mov_b32 s4, -1
; GFX8GISEL-NEXT: s_brev_b32 s5, -2
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
@@ -1309,6 +1316,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8GISEL-NEXT: s_brev_b32 s5, -2
; GFX8GISEL-NEXT: s_mov_b32 s4, -1
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX8GISEL-NEXT: s_mov_b32 s4, -1
+; GFX8GISEL-NEXT: s_brev_b32 s5, -2
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX8GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1332,6 +1344,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9DAGISEL: ; %bb.0: ; %entry
; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX9DAGISEL-NEXT: s_mov_b32 s4, -1
; GFX9DAGISEL-NEXT: s_brev_b32 s5, -2
; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
@@ -1340,6 +1353,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9DAGISEL-NEXT: s_brev_b32 s5, -2
; GFX9DAGISEL-NEXT: s_mov_b32 s4, -1
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX9DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX9DAGISEL-NEXT: s_brev_b32 s5, -2
+; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1363,6 +1381,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9GISEL: ; %bb.0: ; %entry
; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX9GISEL-NEXT: s_mov_b32 s4, -1
; GFX9GISEL-NEXT: s_brev_b32 s5, -2
; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
@@ -1371,6 +1390,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9GISEL-NEXT: s_brev_b32 s5, -2
; GFX9GISEL-NEXT: s_mov_b32 s4, -1
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX9GISEL-NEXT: s_mov_b32 s4, -1
+; GFX9GISEL-NEXT: s_brev_b32 s5, -2
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX9GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1394,6 +1418,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064DAGISEL: ; %bb.0: ; %entry
; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1064DAGISEL-NEXT: s_mov_b32 s4, -1
; GFX1064DAGISEL-NEXT: s_brev_b32 s5, -2
; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
@@ -1402,6 +1427,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064DAGISEL-NEXT: s_brev_b32 s5, -2
; GFX1064DAGISEL-NEXT: s_mov_b32 s4, -1
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1064DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX1064DAGISEL-NEXT: s_brev_b32 s5, -2
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1424,6 +1454,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064GISEL: ; %bb.0: ; %entry
; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1064GISEL-NEXT: s_mov_b32 s4, -1
; GFX1064GISEL-NEXT: s_brev_b32 s5, -2
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
@@ -1432,6 +1463,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064GISEL-NEXT: s_brev_b32 s5, -2
; GFX1064GISEL-NEXT: s_mov_b32 s4, -1
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1064GISEL-NEXT: s_mov_b32 s4, -1
+; GFX1064GISEL-NEXT: s_brev_b32 s5, -2
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX1064GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1454,6 +1490,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032DAGISEL: ; %bb.0: ; %entry
; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1032DAGISEL-NEXT: s_mov_b32 s4, -1
; GFX1032DAGISEL-NEXT: s_brev_b32 s5, -2
; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
@@ -1462,6 +1499,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032DAGISEL-NEXT: s_brev_b32 s5, -2
; GFX1032DAGISEL-NEXT: s_mov_b32 s4, -1
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1032DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX1032DAGISEL-NEXT: s_brev_b32 s5, -2
+; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1484,6 +1526,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032GISEL: ; %bb.0: ; %entry
; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1032GISEL-NEXT: s_mov_b32 s4, -1
; GFX1032GISEL-NEXT: s_brev_b32 s5, -2
; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
@@ -1492,6 +1535,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032GISEL-NEXT: s_brev_b32 s5, -2
; GFX1032GISEL-NEXT: s_mov_b32 s4, -1
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1032GISEL-NEXT: s_mov_b32 s4, -1
+; GFX1032GISEL-NEXT: s_brev_b32 s5, -2
+; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1514,6 +1562,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164DAGISEL: ; %bb.0: ; %entry
; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1164DAGISEL-NEXT: s_mov_b32 s0, -1
; GFX1164DAGISEL-NEXT: s_brev_b32 s1, -2
; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
@@ -1522,18 +1571,29 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
=======
; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164DAGISEL-NEXT: s_brev_b32 s1, -2
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164DAGISEL-NEXT: s_mov_b32 s0, -1
+; GFX1164DAGISEL-NEXT: s_brev_b32 s1, -2
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v4, s0
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v5, s1
; GFX1164DAGISEL-NEXT: v_readlane_b32 s4, v2, s8
; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v3, s8
<<<<<<< HEAD
+<<<<<<< HEAD
=======
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164DAGISEL-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[4:5]
; GFX1164DAGISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s8
@@ -1550,6 +1610,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164GISEL: ; %bb.0: ; %entry
; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1164GISEL-NEXT: s_mov_b32 s0, -1
; GFX1164GISEL-NEXT: s_brev_b32 s1, -2
; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
@@ -1558,18 +1619,29 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
=======
; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164GISEL-NEXT: s_brev_b32 s1, -2
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164GISEL-NEXT: s_mov_b32 s0, -1
+; GFX1164GISEL-NEXT: s_brev_b32 s1, -2
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164GISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164GISEL-NEXT: v_mov_b32_e32 v4, s0
; GFX1164GISEL-NEXT: v_mov_b32_e32 v5, s1
; GFX1164GISEL-NEXT: v_readlane_b32 s4, v2, s8
; GFX1164GISEL-NEXT: v_readlane_b32 s5, v3, s8
<<<<<<< HEAD
+<<<<<<< HEAD
=======
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164GISEL-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[4:5]
; GFX1164GISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s8
@@ -1586,6 +1658,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132DAGISEL: ; %bb.0: ; %entry
; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1132DAGISEL-NEXT: s_mov_b32 s0, -1
; GFX1132DAGISEL-NEXT: s_brev_b32 s1, -2
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
@@ -1594,17 +1667,28 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
=======
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132DAGISEL-NEXT: s_brev_b32 s1, -2
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132DAGISEL-NEXT: s_mov_b32 s0, -1
+; GFX1132DAGISEL-NEXT: s_brev_b32 s1, -2
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
<<<<<<< HEAD
+<<<<<<< HEAD
=======
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132DAGISEL-NEXT: v_cmp_lt_i64_e32 vcc_lo, s[4:5], v[4:5]
; GFX1132DAGISEL-NEXT: s_and_b32 s6, vcc_lo, s2
; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
@@ -1620,6 +1704,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132GISEL: ; %bb.0: ; %entry
; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1132GISEL-NEXT: s_mov_b32 s0, -1
; GFX1132GISEL-NEXT: s_brev_b32 s1, -2
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
@@ -1628,17 +1713,28 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
=======
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132GISEL-NEXT: s_brev_b32 s1, -2
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132GISEL-NEXT: s_mov_b32 s0, -1
+; GFX1132GISEL-NEXT: s_brev_b32 s1, -2
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132GISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
<<<<<<< HEAD
+<<<<<<< HEAD
=======
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132GISEL-NEXT: v_cmp_lt_i64_e32 vcc_lo, s[4:5], v[4:5]
; GFX1132GISEL-NEXT: s_and_b32 s6, vcc_lo, s2
; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
index 2bcd0c2232a1b..3fc5bb103873b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
@@ -1194,6 +1194,7 @@ entry:
ret void
}
+<<<<<<< HEAD
<<<<<<< HEAD
define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8DAGISEL-LABEL: divergent_value_i64:
@@ -1477,99 +1478,270 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132GISEL-NEXT: s_setpc_b64 s[30:31]
=======
define amdgpu_kernel void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
+=======
+define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX8DAGISEL-LABEL: divergent_value_i64:
; GFX8DAGISEL: ; %bb.0: ; %entry
-; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s2
-; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
-; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
-; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX8DAGISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX8DAGISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX8DAGISEL-NEXT: v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX8DAGISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX8DAGISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX8DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX8DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX8DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX8DAGISEL-NEXT: ; %bb.2:
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s5
; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
-; GFX8DAGISEL-NEXT: s_endpgm
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8DAGISEL-NEXT: s_setpc_b64 s[30:31]
;
; GFX8GISEL-LABEL: divergent_value_i64:
; GFX8GISEL: ; %bb.0: ; %entry
-; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s2
-; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
-; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s3
-; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
-; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
-; GFX8GISEL-NEXT: s_endpgm
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX8GISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX8GISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX8GISEL-NEXT: v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX8GISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX8GISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX8GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX8GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX8GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX8GISEL-NEXT: ; %bb.2:
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8GISEL-NEXT: s_setpc_b64 s[30:31]
;
; GFX9DAGISEL-LABEL: divergent_value_i64:
; GFX9DAGISEL: ; %bb.0: ; %entry
-; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
-; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
-; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
-; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
-; GFX9DAGISEL-NEXT: s_endpgm
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX9DAGISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX9DAGISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX9DAGISEL-NEXT: v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX9DAGISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX9DAGISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX9DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX9DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX9DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX9DAGISEL-NEXT: ; %bb.2:
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9DAGISEL-NEXT: s_setpc_b64 s[30:31]
;
; GFX9GISEL-LABEL: divergent_value_i64:
; GFX9GISEL: ; %bb.0: ; %entry
-; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
-; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
-; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s3
-; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
-; GFX9GISEL-NEXT: s_endpgm
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX9GISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX9GISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX9GISEL-NEXT: v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX9GISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX9GISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX9GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX9GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX9GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX9GISEL-NEXT: ; %bb.2:
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX9GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9GISEL-NEXT: s_setpc_b64 s[30:31]
;
-; GFX10DAGISEL-LABEL: divergent_value_i64:
-; GFX10DAGISEL: ; %bb.0: ; %entry
-; GFX10DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10DAGISEL-NEXT: v_mov_b32_e32 v2, 0
-; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, s2
-; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, s3
-; GFX10DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
-; GFX10DAGISEL-NEXT: s_endpgm
+; GFX1064DAGISEL-LABEL: divergent_value_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX1064DAGISEL-NEXT: v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX1064DAGISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX1064DAGISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX1064DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1064DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX1064DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1064DAGISEL-NEXT: ; %bb.2:
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064DAGISEL-NEXT: s_setpc_b64 s[30:31]
;
-; GFX10GISEL-LABEL: divergent_value_i64:
-; GFX10GISEL: ; %bb.0: ; %entry
-; GFX10GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10GISEL-NEXT: v_mov_b32_e32 v2, 0
-; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10GISEL-NEXT: v_mov_b32_e32 v0, s2
-; GFX10GISEL-NEXT: v_mov_b32_e32 v1, s3
-; GFX10GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
-; GFX10GISEL-NEXT: s_endpgm
+; GFX1064GISEL-LABEL: divergent_value_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX1064GISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX1064GISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX1064GISEL-NEXT: v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX1064GISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX1064GISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX1064GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1064GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX1064GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1064GISEL-NEXT: ; %bb.2:
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1064GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032DAGISEL-LABEL: divergent_value_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s8, v2, s7
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s9, v3, s7
+; GFX1032DAGISEL-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[8:9], v[4:5]
+; GFX1032DAGISEL-NEXT: s_and_b32 s10, vcc_lo, s6
+; GFX1032DAGISEL-NEXT: s_bitset0_b32 s6, s7
+; GFX1032DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1032DAGISEL-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1032DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1032DAGISEL-NEXT: ; %bb.2:
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032GISEL-LABEL: divergent_value_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b64 s[4:5], 0
+; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX1032GISEL-NEXT: v_readlane_b32 s8, v2, s7
+; GFX1032GISEL-NEXT: v_readlane_b32 s9, v3, s7
+; GFX1032GISEL-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[8:9], v[4:5]
+; GFX1032GISEL-NEXT: s_and_b32 s10, vcc_lo, s6
+; GFX1032GISEL-NEXT: s_bitset0_b32 s6, s7
+; GFX1032GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1032GISEL-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1032GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1032GISEL-NEXT: ; %bb.2:
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032GISEL-NEXT: s_setpc_b64 s[30:31]
;
; GFX1164DAGISEL-LABEL: divergent_value_i64:
; GFX1164DAGISEL: ; %bb.0: ; %entry
-; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
-; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
-; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
-; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
-; GFX1164DAGISEL-NEXT: s_endpgm
+; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v4, s0
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v5, s1
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s4, v2, s8
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v3, s8
+; GFX1164DAGISEL-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[4:5]
+; GFX1164DAGISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
+; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s8
+; GFX1164DAGISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1164DAGISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1164DAGISEL-NEXT: ; %bb.2:
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1164DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164DAGISEL-NEXT: s_setpc_b64 s[30:31]
;
; GFX1164GISEL-LABEL: divergent_value_i64:
; GFX1164GISEL: ; %bb.0: ; %entry
-; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
-; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
-; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
-; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
-; GFX1164GISEL-NEXT: s_endpgm
+; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1164GISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v4, s0
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v5, s1
+; GFX1164GISEL-NEXT: v_readlane_b32 s4, v2, s8
+; GFX1164GISEL-NEXT: v_readlane_b32 s5, v3, s8
+; GFX1164GISEL-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[4:5]
+; GFX1164GISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
+; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s8
+; GFX1164GISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1164GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1164GISEL-NEXT: ; %bb.2:
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1164GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164GISEL-NEXT: s_setpc_b64 s[30:31]
;
; GFX1132DAGISEL-LABEL: divergent_value_i64:
; GFX1132DAGISEL: ; %bb.0: ; %entry
-; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
-; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
-; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
-; GFX1132DAGISEL-NEXT: s_endpgm
+; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_mov_b64 s[0:1], 0
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX1132DAGISEL-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[4:5], v[4:5]
+; GFX1132DAGISEL-NEXT: s_and_b32 s6, vcc_lo, s2
+; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX1132DAGISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1132DAGISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1132DAGISEL-NEXT: ; %bb.2:
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132DAGISEL-NEXT: s_setpc_b64 s[30:31]
;
; GFX1132GISEL-LABEL: divergent_value_i64:
; GFX1132GISEL: ; %bb.0: ; %entry
+<<<<<<< HEAD
; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1577,6 +1749,27 @@ define amdgpu_kernel void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x)
; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b64 s[0:1], 0
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
+; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX1132GISEL-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[4:5], v[4:5]
+; GFX1132GISEL-NEXT: s_and_b32 s6, vcc_lo, s2
+; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX1132GISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1132GISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1132GISEL-NEXT: ; %bb.2:
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132GISEL-NEXT: s_setpc_b64 s[30:31]
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
entry:
%result = call i64 @llvm.amdgcn.wave.reduce.umax.i64(i64 %id.x, i32 1)
store i64 %result, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
index e641eeec4353c..29412bc1ed31d 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
@@ -1199,6 +1199,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8DAGISEL: ; %bb.0: ; %entry
; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX8DAGISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
=======
@@ -1206,6 +1207,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX8DAGISEL-NEXT: s_mov_b32 s5, s4
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX8DAGISEL-NEXT: s_mov_b64 s[4:5], -1
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1229,6 +1234,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8GISEL: ; %bb.0: ; %entry
; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX8GISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
=======
@@ -1236,6 +1242,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX8GISEL-NEXT: s_mov_b32 s5, s4
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX8GISEL-NEXT: s_mov_b64 s[4:5], -1
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX8GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1259,6 +1269,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9DAGISEL: ; %bb.0: ; %entry
; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
=======
@@ -1266,6 +1277,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX9DAGISEL-NEXT: s_mov_b32 s5, s4
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], -1
+; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1289,6 +1304,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9GISEL: ; %bb.0: ; %entry
; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX9GISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
=======
@@ -1296,6 +1312,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX9GISEL-NEXT: s_mov_b32 s5, s4
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX9GISEL-NEXT: s_mov_b64 s[4:5], -1
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX9GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1319,6 +1339,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064DAGISEL: ; %bb.0: ; %entry
; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
=======
@@ -1326,6 +1347,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1064DAGISEL-NEXT: s_mov_b32 s5, s4
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], -1
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1348,6 +1373,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064GISEL: ; %bb.0: ; %entry
; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1064GISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
=======
@@ -1355,6 +1381,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1064GISEL-NEXT: s_mov_b32 s5, s4
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1064GISEL-NEXT: s_mov_b64 s[4:5], -1
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX1064GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1377,6 +1407,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032DAGISEL: ; %bb.0: ; %entry
; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1032DAGISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
=======
@@ -1384,6 +1415,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
; GFX1032DAGISEL-NEXT: s_mov_b32 s5, s4
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1032DAGISEL-NEXT: s_mov_b64 s[4:5], -1
+; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1406,6 +1441,7 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032GISEL: ; %bb.0: ; %entry
; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1032GISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
=======
@@ -1413,6 +1449,10 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
; GFX1032GISEL-NEXT: s_mov_b32 s5, s4
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1032GISEL-NEXT: s_mov_b64 s[4:5], -1
+; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1435,25 +1475,35 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164DAGISEL: ; %bb.0: ; %entry
; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1164DAGISEL-NEXT: s_mov_b64 s[0:1], -1
; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
=======
; GFX1164DAGISEL-NEXT: s_mov_b32 s0, -1
+=======
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[0:1], -1
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT: s_mov_b32 s1, s0
; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v4, s0
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v5, s1
; GFX1164DAGISEL-NEXT: v_readlane_b32 s4, v2, s8
; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v3, s8
<<<<<<< HEAD
+<<<<<<< HEAD
=======
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164DAGISEL-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[4:5]
; GFX1164DAGISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s8
@@ -1470,25 +1520,35 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164GISEL: ; %bb.0: ; %entry
; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1164GISEL-NEXT: s_mov_b64 s[0:1], -1
; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
=======
; GFX1164GISEL-NEXT: s_mov_b32 s0, -1
+=======
+; GFX1164GISEL-NEXT: s_mov_b64 s[0:1], -1
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT: s_mov_b32 s1, s0
; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164GISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164GISEL-NEXT: v_mov_b32_e32 v4, s0
; GFX1164GISEL-NEXT: v_mov_b32_e32 v5, s1
; GFX1164GISEL-NEXT: v_readlane_b32 s4, v2, s8
; GFX1164GISEL-NEXT: v_readlane_b32 s5, v3, s8
<<<<<<< HEAD
+<<<<<<< HEAD
=======
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164GISEL-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[4:5]
; GFX1164GISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s8
@@ -1505,24 +1565,34 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132DAGISEL: ; %bb.0: ; %entry
; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1132DAGISEL-NEXT: s_mov_b64 s[0:1], -1
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
=======
; GFX1132DAGISEL-NEXT: s_mov_b32 s0, -1
+=======
+; GFX1132DAGISEL-NEXT: s_mov_b64 s[0:1], -1
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT: s_mov_b32 s1, s0
; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
<<<<<<< HEAD
+<<<<<<< HEAD
=======
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132DAGISEL-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[4:5], v[4:5]
; GFX1132DAGISEL-NEXT: s_and_b32 s6, vcc_lo, s2
; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
@@ -1538,24 +1608,34 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132GISEL: ; %bb.0: ; %entry
; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
<<<<<<< HEAD
+<<<<<<< HEAD
; GFX1132GISEL-NEXT: s_mov_b64 s[0:1], -1
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
=======
; GFX1132GISEL-NEXT: s_mov_b32 s0, -1
+=======
+; GFX1132GISEL-NEXT: s_mov_b64 s[0:1], -1
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT: s_mov_b32 s1, s0
; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+<<<<<<< HEAD
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132GISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
<<<<<<< HEAD
+<<<<<<< HEAD
=======
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
>>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
+=======
+>>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132GISEL-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[4:5], v[4:5]
; GFX1132GISEL-NEXT: s_and_b32 s6, vcc_lo, s2
; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
>From 3fdf40e85be4959d0e92c73fe5314ada64d261ce Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Sat, 19 Jul 2025 12:48:18 +0530
Subject: [PATCH 4/9] [AMDGPU] Extending wave reduction intrinsics for `i64`
types - 2
Supporting Arithemtic Operations: `add`, `sub`
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 155 +-
llvm/lib/Target/AMDGPU/SIInstructions.td | 2 +
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll | 1356 +++++++++++++
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll | 1711 ++++++++++++++++-
4 files changed, 3151 insertions(+), 73 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index dc9af5393bba7..7668c4c2169d5 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5281,7 +5281,9 @@ static uint32_t getIdentityValueFor32BitWaveReduction(unsigned Opc) {
case AMDGPU::S_MAX_I32:
return std::numeric_limits<int32_t>::min();
case AMDGPU::S_ADD_I32:
+ case AMDGPU::S_ADD_U64_PSEUDO:
case AMDGPU::S_SUB_I32:
+ case AMDGPU::S_SUB_U64_PSEUDO:
case AMDGPU::S_OR_B32:
case AMDGPU::S_XOR_B32:
return std::numeric_limits<uint32_t>::min();
@@ -5364,11 +5366,14 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
}
case AMDGPU::S_XOR_B32:
case AMDGPU::S_ADD_I32:
- case AMDGPU::S_SUB_I32: {
+ case AMDGPU::S_ADD_U64_PSEUDO:
+ case AMDGPU::S_SUB_I32:
+ case AMDGPU::S_SUB_U64_PSEUDO: {
const TargetRegisterClass *WaveMaskRegClass = TRI->getWaveMaskRegClass();
const TargetRegisterClass *DstRegClass = MRI.getRegClass(DstReg);
Register ExecMask = MRI.createVirtualRegister(WaveMaskRegClass);
- Register ActiveLanes = MRI.createVirtualRegister(DstRegClass);
+ Register ActiveLanes =
+ MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
bool IsWave32 = ST.isWave32();
unsigned MovOpc = IsWave32 ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
@@ -5376,39 +5381,39 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
unsigned CountReg =
IsWave32 ? AMDGPU::S_BCNT1_I32_B32 : AMDGPU::S_BCNT1_I32_B64;
- auto Exec =
BuildMI(BB, MI, DL, TII->get(MovOpc), ExecMask).addReg(ExecReg);
- auto NewAccumulator = BuildMI(BB, MI, DL, TII->get(CountReg), ActiveLanes)
- .addReg(Exec->getOperand(0).getReg());
+ auto NewAccumulator =
+ BuildMI(BB, MI, DL, TII->get(CountReg), ActiveLanes)
+ .addReg(ExecMask);
+
+ switch (Opc) {
+ case AMDGPU::S_XOR_B32: {
+ // Performing an XOR operation on a uniform value
+ // depends on the parity of the number of active lanes.
+ // For even parity, the result will be 0, for odd
+ // parity the result will be the same as the input value.
+ Register ParityRegister =
+ MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
- switch (Opc) {
- case AMDGPU::S_XOR_B32: {
- // Performing an XOR operation on a uniform value
- // depends on the parity of the number of active lanes.
- // For even parity, the result will be 0, for odd
- // parity the result will be the same as the input value.
- Register ParityRegister = MRI.createVirtualRegister(DstRegClass);
-
- auto ParityReg =
BuildMI(BB, MI, DL, TII->get(AMDGPU::S_AND_B32), ParityRegister)
.addReg(NewAccumulator->getOperand(0).getReg())
- .addImm(1);
- BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DstReg)
- .addReg(SrcReg)
- .addReg(ParityReg->getOperand(0).getReg());
- break;
- }
+ .addImm(1)
+ .setOperandDead(3); // Dead scc
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DstReg)
+ .addReg(SrcReg)
+ .addReg(ParityRegister);
+ break;
+ }
case AMDGPU::S_SUB_I32: {
Register NegatedVal = MRI.createVirtualRegister(DstRegClass);
// Take the negation of the source operand.
- auto InvertedValReg =
- BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), NegatedVal)
- .addImm(-1)
- .addReg(SrcReg);
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_SUB_I32), NegatedVal)
+ .addImm(0)
+ .addReg(SrcReg);
BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DstReg)
- .addReg(InvertedValReg->getOperand(0).getReg())
+ .addReg(NegatedVal)
.addReg(NewAccumulator->getOperand(0).getReg());
break;
}
@@ -5418,6 +5423,74 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
.addReg(NewAccumulator->getOperand(0).getReg());
break;
}
+ case AMDGPU::S_ADD_U64_PSEUDO:
+ case AMDGPU::S_SUB_U64_PSEUDO: {
+ Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ Register Op1H_Op0L_Reg =
+ MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ Register Op1L_Op0H_Reg =
+ MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ Register CarryReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ Register AddReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ Register NegatedValLo =
+ MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ Register NegatedValHi =
+ MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+
+ const TargetRegisterClass *Src1RC = MRI.getRegClass(SrcReg);
+ const TargetRegisterClass *Src1SubRC =
+ TRI->getSubRegisterClass(Src1RC, AMDGPU::sub0);
+
+ MachineOperand Op1L = TII->buildExtractSubRegOrImm(
+ MI, MRI, MI.getOperand(1), Src1RC, AMDGPU::sub0, Src1SubRC);
+ MachineOperand Op1H = TII->buildExtractSubRegOrImm(
+ MI, MRI, MI.getOperand(1), Src1RC, AMDGPU::sub1, Src1SubRC);
+
+ if (Opc == AMDGPU::S_SUB_U64_PSEUDO) {
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_SUB_I32), NegatedValLo)
+ .addImm(0)
+ .addReg(NewAccumulator->getOperand(0).getReg());
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_ASHR_I32), NegatedValHi)
+ .addReg(NegatedValLo)
+ .addImm(31)
+ .setOperandDead(3); // Dead scc
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), Op1L_Op0H_Reg)
+ .add(Op1L)
+ .addReg(NegatedValHi);
+ }
+ Register LowOpcode = Opc == AMDGPU::S_SUB_U64_PSEUDO
+ ? NegatedValLo
+ : NewAccumulator->getOperand(0).getReg();
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DestSub0)
+ .add(Op1L)
+ .addReg(LowOpcode);
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_HI_U32), CarryReg)
+ .add(Op1L)
+ .addReg(LowOpcode);
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), Op1H_Op0L_Reg)
+ .add(Op1H)
+ .addReg(LowOpcode);
+
+ Register HiVal = Opc == AMDGPU::S_SUB_U64_PSEUDO ? AddReg : DestSub1;
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_ADD_U32), HiVal)
+ .addReg(CarryReg)
+ .addReg(Op1H_Op0L_Reg)
+ .setOperandDead(3); // Dead scc
+
+ if (Opc == AMDGPU::S_SUB_U64_PSEUDO) {
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_ADD_U32), DestSub1)
+ .addReg(HiVal)
+ .addReg(Op1L_Op0H_Reg)
+ .setOperandDead(3); // Dead scc
+ }
+ BuildMI(BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), DstReg)
+ .addReg(DestSub0)
+ .addImm(AMDGPU::sub0)
+ .addReg(DestSub1)
+ .addImm(AMDGPU::sub1);
+ break;
+ }
}
RetBB = &BB;
}
@@ -5564,6 +5637,34 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
.addReg(Accumulator->getOperand(0).getReg());
break;
}
+ case ::AMDGPU::S_ADD_U64_PSEUDO:
+ case ::AMDGPU::S_SUB_U64_PSEUDO: {
+ unsigned newOpc1 = Opc == AMDGPU::S_ADD_U64_PSEUDO ? AMDGPU::S_ADD_U32
+ : AMDGPU::S_SUB_U32;
+ unsigned newOpc2 = Opc == AMDGPU::S_ADD_U64_PSEUDO ? AMDGPU::S_ADDC_U32
+ : AMDGPU::S_SUBB_U32;
+ Register DestLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ Register DestHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ MachineOperand Accumlo = TII->buildExtractSubRegOrImm(
+ MI, MRI, Accumulator->getOperand(0), DstRegClass, AMDGPU::sub0,
+ &AMDGPU::SReg_32RegClass);
+ MachineOperand Accumhi = TII->buildExtractSubRegOrImm(
+ MI, MRI, Accumulator->getOperand(0), DstRegClass, AMDGPU::sub1,
+ &AMDGPU::SReg_32RegClass);
+ BuildMI(*ComputeLoop, I, DL, TII->get(newOpc1), DestLo)
+ .add(Accumlo)
+ .addReg(LaneValueLo->getOperand(0).getReg());
+ BuildMI(*ComputeLoop, I, DL, TII->get(newOpc2), DestHi)
+ .add(Accumhi)
+ .addReg(LaneValueHi->getOperand(0).getReg());
+ NewAccumulator = BuildMI(*ComputeLoop, I, DL,
+ TII->get(TargetOpcode::REG_SEQUENCE), DstReg)
+ .addReg(DestLo)
+ .addImm(AMDGPU::sub0)
+ .addReg(DestHi)
+ .addImm(AMDGPU::sub1);
+ break;
+ }
}
}
// Manipulate the iterator to get the next active lane
@@ -5619,8 +5720,12 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_CMP_GT_I64_e64);
case AMDGPU::WAVE_REDUCE_ADD_PSEUDO_I32:
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_ADD_I32);
+ case AMDGPU::WAVE_REDUCE_ADD_PSEUDO_U64:
+ return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_ADD_U64_PSEUDO);
case AMDGPU::WAVE_REDUCE_SUB_PSEUDO_I32:
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_SUB_I32);
+ case AMDGPU::WAVE_REDUCE_SUB_PSEUDO_U64:
+ return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_SUB_U64_PSEUDO);
case AMDGPU::WAVE_REDUCE_AND_PSEUDO_B32:
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_AND_B32);
case AMDGPU::WAVE_REDUCE_OR_PSEUDO_B32:
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 5449267748f5f..d6fe587e081cf 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -367,6 +367,8 @@ defvar Operations = [
WaveReduceOp<"min", "I64", i64, SGPR_64, VSrc_b64>,
WaveReduceOp<"umax", "U64", i64, SGPR_64, VSrc_b64>,
WaveReduceOp<"max", "I64", i64, SGPR_64, VSrc_b64>,
+ WaveReduceOp<"add", "U64", i64, SGPR_64, VSrc_b64>,
+ WaveReduceOp<"sub", "U64", i64, SGPR_64, VSrc_b64>,
];
foreach Op = Operations in {
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
index d2ca1d8136043..b6af8b4bb798d 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
@@ -1226,6 +1226,1362 @@ endif:
store i32 %combine, ptr addrspace(1) %out
ret void
}
+
+define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: uniform_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: s_mul_i32 s0, s2, s4
+; GFX8DAGISEL-NEXT: s_mul_hi_u32 s1, s2, s4
+; GFX8DAGISEL-NEXT: s_mul_i32 s2, s3, s4
+; GFX8DAGISEL-NEXT: s_add_u32 s1, s1, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: uniform_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s5, s[4:5]
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mul_i32 s4, s2, s5
+; GFX8GISEL-NEXT: s_mul_hi_u32 s2, s2, s5
+; GFX8GISEL-NEXT: s_mul_i32 s3, s3, s5
+; GFX8GISEL-NEXT: s_add_u32 s5, s2, s3
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: uniform_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s5, s[4:5]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_mul_i32 s4, s2, s5
+; GFX9DAGISEL-NEXT: s_mul_hi_u32 s2, s2, s5
+; GFX9DAGISEL-NEXT: s_mul_i32 s3, s3, s5
+; GFX9DAGISEL-NEXT: s_add_u32 s5, s2, s3
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: uniform_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s5, s[4:5]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mul_i32 s4, s2, s5
+; GFX9GISEL-NEXT: s_mul_hi_u32 s2, s2, s5
+; GFX9GISEL-NEXT: s_mul_i32 s3, s3, s5
+; GFX9GISEL-NEXT: s_add_u32 s5, s2, s3
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX1064DAGISEL-LABEL: uniform_value_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s5, s2, s4
+; GFX1064DAGISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1064DAGISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1064DAGISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT: s_endpgm
+;
+; GFX1064GISEL-LABEL: uniform_value_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mul_hi_u32 s5, s2, s4
+; GFX1064GISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1064GISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1064GISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT: s_endpgm
+;
+; GFX1032DAGISEL-LABEL: uniform_value_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT: s_mov_b32 s4, exec_lo
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s4, s4
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s5, s2, s4
+; GFX1032DAGISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1032DAGISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1032DAGISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT: s_endpgm
+;
+; GFX1032GISEL-LABEL: uniform_value_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT: s_mov_b32 s4, exec_lo
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s4, s4
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mul_hi_u32 s5, s2, s4
+; GFX1032GISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1032GISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1032GISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: uniform_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s5, s2, s4
+; GFX1164DAGISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1164DAGISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1164DAGISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: uniform_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mul_hi_u32 s5, s2, s4
+; GFX1164GISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1164GISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1164GISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: uniform_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: s_mov_b32 s4, exec_lo
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s4, s4
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s5, s2, s4
+; GFX1132DAGISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1132DAGISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1132DAGISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: uniform_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: s_mov_b32 s4, exec_lo
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s4, s4
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mul_hi_u32 s5, s2, s4
+; GFX1132GISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1132GISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1132GISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 %in, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
+; GFX8DAGISEL-LABEL: const_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s3, s[2:3]
+; GFX8DAGISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX8DAGISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s3
+; GFX8DAGISEL-NEXT: s_mul_i32 s3, s3, 0
+; GFX8DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: const_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s3, s[2:3]
+; GFX8GISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX8GISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s3
+; GFX8GISEL-NEXT: s_mul_i32 s3, s3, 0
+; GFX8GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: const_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s3, s[2:3]
+; GFX9DAGISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX9DAGISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s3
+; GFX9DAGISEL-NEXT: s_mul_i32 s3, s3, 0
+; GFX9DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: const_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s3, s[2:3]
+; GFX9GISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX9GISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s3
+; GFX9GISEL-NEXT: s_mul_i32 s3, s3, 0
+; GFX9GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX1064DAGISEL-LABEL: const_value_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s3, 0x7b, s2
+; GFX1064DAGISEL-NEXT: s_mul_i32 s4, s2, 0
+; GFX1064DAGISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1064DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT: s_endpgm
+;
+; GFX1064GISEL-LABEL: const_value_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1064GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064GISEL-NEXT: s_mul_hi_u32 s3, 0x7b, s2
+; GFX1064GISEL-NEXT: s_mul_i32 s4, s2, 0
+; GFX1064GISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1064GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT: s_endpgm
+;
+; GFX1032DAGISEL-LABEL: const_value_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s3, 0x7b, s2
+; GFX1032DAGISEL-NEXT: s_mul_i32 s4, s2, 0
+; GFX1032DAGISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1032DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT: s_endpgm
+;
+; GFX1032GISEL-LABEL: const_value_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1032GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1032GISEL-NEXT: s_mul_hi_u32 s3, 0x7b, s2
+; GFX1032GISEL-NEXT: s_mul_i32 s4, s2, 0
+; GFX1032GISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1032GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: const_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s3, 0x7b, s2
+; GFX1164DAGISEL-NEXT: s_mul_i32 s4, s2, 0
+; GFX1164DAGISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1164DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: const_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: s_mul_hi_u32 s3, 0x7b, s2
+; GFX1164GISEL-NEXT: s_mul_i32 s4, s2, 0
+; GFX1164GISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1164GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: const_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s3, 0x7b, s2
+; GFX1132DAGISEL-NEXT: s_mul_i32 s4, s2, 0
+; GFX1132DAGISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1132DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: const_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: s_mul_hi_u32 s3, 0x7b, s2
+; GFX1132GISEL-NEXT: s_mul_i32 s4, s2, 0
+; GFX1132GISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1132GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 123, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: poison_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s3, s[2:3]
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX8DAGISEL-NEXT: s_mul_hi_u32 s4, s0, s3
+; GFX8DAGISEL-NEXT: s_mul_i32 s3, s1, s3
+; GFX8DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: poison_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s3, s[2:3]
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX8GISEL-NEXT: s_mul_hi_u32 s4, s0, s3
+; GFX8GISEL-NEXT: s_mul_i32 s3, s1, s3
+; GFX8GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: poison_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s3, s[2:3]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX9DAGISEL-NEXT: s_mul_hi_u32 s4, s0, s3
+; GFX9DAGISEL-NEXT: s_mul_i32 s3, s1, s3
+; GFX9DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: poison_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s3, s[2:3]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX9GISEL-NEXT: s_mul_hi_u32 s4, s0, s3
+; GFX9GISEL-NEXT: s_mul_i32 s3, s1, s3
+; GFX9GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX1064DAGISEL-LABEL: poison_value_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s3, s0, s2
+; GFX1064DAGISEL-NEXT: s_mul_i32 s4, s1, s2
+; GFX1064DAGISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1064DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT: s_endpgm
+;
+; GFX1064GISEL-LABEL: poison_value_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1064GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mul_hi_u32 s3, s0, s2
+; GFX1064GISEL-NEXT: s_mul_i32 s4, s1, s2
+; GFX1064GISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1064GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT: s_endpgm
+;
+; GFX1032DAGISEL-LABEL: poison_value_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s3, s0, s2
+; GFX1032DAGISEL-NEXT: s_mul_i32 s4, s1, s2
+; GFX1032DAGISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1032DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT: s_endpgm
+;
+; GFX1032GISEL-LABEL: poison_value_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1032GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mul_hi_u32 s3, s0, s2
+; GFX1032GISEL-NEXT: s_mul_i32 s4, s1, s2
+; GFX1032GISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1032GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: poison_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s3, s0, s2
+; GFX1164DAGISEL-NEXT: s_mul_i32 s4, s1, s2
+; GFX1164DAGISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1164DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: poison_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mul_hi_u32 s3, s0, s2
+; GFX1164GISEL-NEXT: s_mul_i32 s4, s1, s2
+; GFX1164GISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1164GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: poison_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s3, s0, s2
+; GFX1132DAGISEL-NEXT: s_mul_i32 s4, s1, s2
+; GFX1132DAGISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1132DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: poison_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mul_hi_u32 s3, s0, s2
+; GFX1132GISEL-NEXT: s_mul_i32 s4, s1, s2
+; GFX1132GISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1132GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 poison, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
+; GFX8DAGISEL-LABEL: divergent_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
+; GFX8DAGISEL-NEXT: v_readlane_b32 s9, v2, s8
+; GFX8DAGISEL-NEXT: v_readlane_b32 s10, v3, s8
+; GFX8DAGISEL-NEXT: s_add_u32 s4, s4, s9
+; GFX8DAGISEL-NEXT: s_bitset0_b64 s[6:7], s8
+; GFX8DAGISEL-NEXT: s_addc_u32 s5, s5, s10
+; GFX8DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX8DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX8DAGISEL-NEXT: ; %bb.2:
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8GISEL-LABEL: divergent_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b32 s4, 0
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT: s_mov_b32 s5, s4
+; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8GISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
+; GFX8GISEL-NEXT: v_readlane_b32 s9, v2, s8
+; GFX8GISEL-NEXT: v_readlane_b32 s10, v3, s8
+; GFX8GISEL-NEXT: s_add_u32 s4, s4, s9
+; GFX8GISEL-NEXT: s_bitset0_b64 s[6:7], s8
+; GFX8GISEL-NEXT: s_addc_u32 s5, s5, s10
+; GFX8GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX8GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX8GISEL-NEXT: ; %bb.2:
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9DAGISEL-LABEL: divergent_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
+; GFX9DAGISEL-NEXT: v_readlane_b32 s9, v2, s8
+; GFX9DAGISEL-NEXT: v_readlane_b32 s10, v3, s8
+; GFX9DAGISEL-NEXT: s_add_u32 s4, s4, s9
+; GFX9DAGISEL-NEXT: s_bitset0_b64 s[6:7], s8
+; GFX9DAGISEL-NEXT: s_addc_u32 s5, s5, s10
+; GFX9DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX9DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX9DAGISEL-NEXT: ; %bb.2:
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9GISEL-LABEL: divergent_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b32 s4, 0
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT: s_mov_b32 s5, s4
+; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9GISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
+; GFX9GISEL-NEXT: v_readlane_b32 s9, v2, s8
+; GFX9GISEL-NEXT: v_readlane_b32 s10, v3, s8
+; GFX9GISEL-NEXT: s_add_u32 s4, s4, s9
+; GFX9GISEL-NEXT: s_bitset0_b64 s[6:7], s8
+; GFX9GISEL-NEXT: s_addc_u32 s5, s5, s10
+; GFX9GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX9GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX9GISEL-NEXT: ; %bb.2:
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX9GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064DAGISEL-LABEL: divergent_value_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s9, v2, s8
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s10, v3, s8
+; GFX1064DAGISEL-NEXT: s_bitset0_b64 s[6:7], s8
+; GFX1064DAGISEL-NEXT: s_add_u32 s4, s4, s9
+; GFX1064DAGISEL-NEXT: s_addc_u32 s5, s5, s10
+; GFX1064DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX1064DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1064DAGISEL-NEXT: ; %bb.2:
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064GISEL-LABEL: divergent_value_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b32 s4, 0
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT: s_mov_b32 s5, s4
+; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064GISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
+; GFX1064GISEL-NEXT: v_readlane_b32 s9, v2, s8
+; GFX1064GISEL-NEXT: v_readlane_b32 s10, v3, s8
+; GFX1064GISEL-NEXT: s_bitset0_b64 s[6:7], s8
+; GFX1064GISEL-NEXT: s_add_u32 s4, s4, s9
+; GFX1064GISEL-NEXT: s_addc_u32 s5, s5, s10
+; GFX1064GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX1064GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1064GISEL-NEXT: ; %bb.2:
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1064GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032DAGISEL-LABEL: divergent_value_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s8, v2, s7
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s9, v3, s7
+; GFX1032DAGISEL-NEXT: s_bitset0_b32 s6, s7
+; GFX1032DAGISEL-NEXT: s_add_u32 s4, s4, s8
+; GFX1032DAGISEL-NEXT: s_addc_u32 s5, s5, s9
+; GFX1032DAGISEL-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1032DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1032DAGISEL-NEXT: ; %bb.2:
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032GISEL-LABEL: divergent_value_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b32 s4, 0
+; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT: s_mov_b32 s5, s4
+; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
+; GFX1032GISEL-NEXT: v_readlane_b32 s8, v2, s7
+; GFX1032GISEL-NEXT: v_readlane_b32 s9, v3, s7
+; GFX1032GISEL-NEXT: s_bitset0_b32 s6, s7
+; GFX1032GISEL-NEXT: s_add_u32 s4, s4, s8
+; GFX1032GISEL-NEXT: s_addc_u32 s5, s5, s9
+; GFX1032GISEL-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1032GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1032GISEL-NEXT: ; %bb.2:
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164DAGISEL-LABEL: divergent_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_mov_b32 s0, 0
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT: s_mov_b32 s1, s0
+; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s4, s[2:3]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v2, s4
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s6, v3, s4
+; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s4
+; GFX1164DAGISEL-NEXT: s_add_u32 s0, s0, s5
+; GFX1164DAGISEL-NEXT: s_addc_u32 s1, s1, s6
+; GFX1164DAGISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1164DAGISEL-NEXT: ; %bb.2:
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1164DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164GISEL-LABEL: divergent_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b32 s0, 0
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT: s_mov_b32 s1, s0
+; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT: s_ctz_i32_b64 s4, s[2:3]
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: v_readlane_b32 s5, v2, s4
+; GFX1164GISEL-NEXT: v_readlane_b32 s6, v3, s4
+; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s4
+; GFX1164GISEL-NEXT: s_add_u32 s0, s0, s5
+; GFX1164GISEL-NEXT: s_addc_u32 s1, s1, s6
+; GFX1164GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1164GISEL-NEXT: ; %bb.2:
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1164GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132DAGISEL-LABEL: divergent_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_mov_b32 s0, 0
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT: s_mov_b32 s1, s0
+; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX1132DAGISEL-NEXT: s_add_u32 s0, s0, s4
+; GFX1132DAGISEL-NEXT: s_addc_u32 s1, s1, s5
+; GFX1132DAGISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1132DAGISEL-NEXT: ; %bb.2:
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132GISEL-LABEL: divergent_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b32 s0, 0
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT: s_mov_b32 s1, s0
+; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX1132GISEL-NEXT: s_add_u32 s0, s0, s4
+; GFX1132GISEL-NEXT: s_addc_u32 s1, s1, s5
+; GFX1132GISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1132GISEL-NEXT: ; %bb.2:
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132GISEL-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 %id.x, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64 %in2) {
+; GFX8DAGISEL-LABEL: divergent_cfg_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX8DAGISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX8DAGISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX8DAGISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX8DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s7, s[6:7]
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_mul_i32 s6, s2, s7
+; GFX8DAGISEL-NEXT: s_mul_hi_u32 s2, s2, s7
+; GFX8DAGISEL-NEXT: s_mul_i32 s3, s3, s7
+; GFX8DAGISEL-NEXT: s_add_u32 s7, s2, s3
+; GFX8DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[8:9]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX8DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
+; GFX8DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s7, s[6:7]
+; GFX8DAGISEL-NEXT: s_mul_i32 s6, s4, s7
+; GFX8DAGISEL-NEXT: s_mul_hi_u32 s4, s4, s7
+; GFX8DAGISEL-NEXT: s_mul_i32 s5, s5, s7
+; GFX8DAGISEL-NEXT: s_add_u32 s7, s4, s5
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX8DAGISEL-NEXT: ; %bb.4: ; %endif
+; GFX8DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: divergent_cfg_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX8GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX8GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX8GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX8GISEL-NEXT: ; %bb.1: ; %else
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s7, s[6:7]
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mul_i32 s6, s2, s7
+; GFX8GISEL-NEXT: s_mul_hi_u32 s2, s2, s7
+; GFX8GISEL-NEXT: s_mul_i32 s3, s3, s7
+; GFX8GISEL-NEXT: s_add_u32 s7, s2, s3
+; GFX8GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX8GISEL-NEXT: ; %bb.3: ; %if
+; GFX8GISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s7, s[6:7]
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mul_i32 s6, s4, s7
+; GFX8GISEL-NEXT: s_mul_hi_u32 s4, s4, s7
+; GFX8GISEL-NEXT: s_mul_i32 s5, s5, s7
+; GFX8GISEL-NEXT: s_add_u32 s7, s4, s5
+; GFX8GISEL-NEXT: .LBB9_4: ; %endif
+; GFX8GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: divergent_cfg_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX9DAGISEL-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9DAGISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX9DAGISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX9DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s5, s[4:5]
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_mul_i32 s4, s2, s5
+; GFX9DAGISEL-NEXT: s_mul_hi_u32 s2, s2, s5
+; GFX9DAGISEL-NEXT: s_mul_i32 s3, s3, s5
+; GFX9DAGISEL-NEXT: s_add_u32 s5, s2, s3
+; GFX9DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[8:9]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
+; GFX9DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s5, s[4:5]
+; GFX9DAGISEL-NEXT: s_mul_i32 s4, s6, s5
+; GFX9DAGISEL-NEXT: s_mul_hi_u32 s6, s6, s5
+; GFX9DAGISEL-NEXT: s_mul_i32 s5, s7, s5
+; GFX9DAGISEL-NEXT: s_add_u32 s5, s6, s5
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9DAGISEL-NEXT: ; %bb.4: ; %endif
+; GFX9DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: divergent_cfg_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX9GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX9GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX9GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX9GISEL-NEXT: ; %bb.1: ; %else
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s7, s[6:7]
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mul_i32 s6, s2, s7
+; GFX9GISEL-NEXT: s_mul_hi_u32 s2, s2, s7
+; GFX9GISEL-NEXT: s_mul_i32 s3, s3, s7
+; GFX9GISEL-NEXT: s_add_u32 s7, s2, s3
+; GFX9GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX9GISEL-NEXT: ; %bb.3: ; %if
+; GFX9GISEL-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
+; GFX9GISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mul_i32 s6, s8, s4
+; GFX9GISEL-NEXT: s_mul_hi_u32 s5, s8, s4
+; GFX9GISEL-NEXT: s_mul_i32 s4, s9, s4
+; GFX9GISEL-NEXT: s_add_u32 s7, s5, s4
+; GFX9GISEL-NEXT: .LBB9_4: ; %endif
+; GFX9GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX1064DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_clause 0x1
+; GFX1064DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX1064DAGISEL-NEXT: ; implicit-def: $sgpr8_sgpr9
+; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064DAGISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1064DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[8:9], exec
+; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s8, s[8:9]
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s9, s2, s8
+; GFX1064DAGISEL-NEXT: s_mul_i32 s3, s3, s8
+; GFX1064DAGISEL-NEXT: s_mul_i32 s8, s2, s8
+; GFX1064DAGISEL-NEXT: s_add_u32 s9, s9, s3
+; GFX1064DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[4:5]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s8
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s9
+; GFX1064DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
+; GFX1064DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s5, s6, s4
+; GFX1064DAGISEL-NEXT: s_mul_i32 s7, s7, s4
+; GFX1064DAGISEL-NEXT: s_mul_i32 s4, s6, s4
+; GFX1064DAGISEL-NEXT: s_add_u32 s5, s5, s7
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX1064DAGISEL-NEXT: ; %bb.4: ; %endif
+; GFX1064DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT: s_endpgm
+;
+; GFX1064GISEL-LABEL: divergent_cfg_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX1064GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1064GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX1064GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1064GISEL-NEXT: ; %bb.1: ; %else
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mul_hi_u32 s7, s2, s6
+; GFX1064GISEL-NEXT: s_mul_i32 s3, s3, s6
+; GFX1064GISEL-NEXT: s_mul_i32 s6, s2, s6
+; GFX1064GISEL-NEXT: s_add_u32 s7, s7, s3
+; GFX1064GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1064GISEL-NEXT: ; %bb.3: ; %if
+; GFX1064GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064GISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mul_hi_u32 s5, s6, s4
+; GFX1064GISEL-NEXT: s_mul_i32 s7, s7, s4
+; GFX1064GISEL-NEXT: s_mul_i32 s6, s6, s4
+; GFX1064GISEL-NEXT: s_add_u32 s7, s5, s7
+; GFX1064GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1064GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT: s_endpgm
+;
+; GFX1032DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_clause 0x1
+; GFX1032DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc_lo, 15, v0
+; GFX1032DAGISEL-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX1032DAGISEL-NEXT: s_and_saveexec_b32 s8, vcc_lo
+; GFX1032DAGISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1032DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX1032DAGISEL-NEXT: s_mov_b32 s4, exec_lo
+; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s4, s4
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s5, s2, s4
+; GFX1032DAGISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1032DAGISEL-NEXT: s_mul_i32 s4, s2, s4
+; GFX1032DAGISEL-NEXT: s_add_u32 s5, s5, s3
+; GFX1032DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s2, s8
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX1032DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s2
+; GFX1032DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX1032DAGISEL-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s5, s6, s3
+; GFX1032DAGISEL-NEXT: s_mul_i32 s7, s7, s3
+; GFX1032DAGISEL-NEXT: s_mul_i32 s4, s6, s3
+; GFX1032DAGISEL-NEXT: s_add_u32 s5, s5, s7
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX1032DAGISEL-NEXT: ; %bb.4: ; %endif
+; GFX1032DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT: s_endpgm
+;
+; GFX1032GISEL-LABEL: divergent_cfg_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 16, v0
+; GFX1032GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1032GISEL-NEXT: s_and_saveexec_b32 s8, vcc_lo
+; GFX1032GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1032GISEL-NEXT: ; %bb.1: ; %else
+; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s6, s6
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mul_hi_u32 s7, s2, s6
+; GFX1032GISEL-NEXT: s_mul_i32 s3, s3, s6
+; GFX1032GISEL-NEXT: s_mul_i32 s6, s2, s6
+; GFX1032GISEL-NEXT: s_add_u32 s7, s7, s3
+; GFX1032GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s2, s8
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1032GISEL-NEXT: ; %bb.3: ; %if
+; GFX1032GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032GISEL-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mul_hi_u32 s4, s6, s3
+; GFX1032GISEL-NEXT: s_mul_i32 s5, s7, s3
+; GFX1032GISEL-NEXT: s_mul_i32 s6, s6, s3
+; GFX1032GISEL-NEXT: s_add_u32 s7, s4, s5
+; GFX1032GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1032GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_clause 0x1
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT: ; implicit-def: $sgpr8_sgpr9
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX1164DAGISEL-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
+; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1164DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[8:9], exec
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s8, s[8:9]
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s9, s2, s8
+; GFX1164DAGISEL-NEXT: s_mul_i32 s3, s3, s8
+; GFX1164DAGISEL-NEXT: s_mul_i32 s8, s2, s8
+; GFX1164DAGISEL-NEXT: s_add_u32 s9, s9, s3
+; GFX1164DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[6:7]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s8
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s9
+; GFX1164DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
+; GFX1164DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s7, s4, s6
+; GFX1164DAGISEL-NEXT: s_mul_i32 s5, s5, s6
+; GFX1164DAGISEL-NEXT: s_mul_i32 s4, s4, s6
+; GFX1164DAGISEL-NEXT: s_add_u32 s5, s7, s5
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX1164DAGISEL-NEXT: ; %bb.4: ; %endif
+; GFX1164DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: divergent_cfg_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164GISEL-NEXT: s_mov_b64 s[8:9], exec
+; GFX1164GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
+; GFX1164GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1164GISEL-NEXT: ; %bb.1: ; %else
+; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mul_hi_u32 s7, s2, s6
+; GFX1164GISEL-NEXT: s_mul_i32 s3, s3, s6
+; GFX1164GISEL-NEXT: s_mul_i32 s6, s2, s6
+; GFX1164GISEL-NEXT: s_add_u32 s7, s7, s3
+; GFX1164GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[2:3], s[8:9]
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1164GISEL-NEXT: ; %bb.3: ; %if
+; GFX1164GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mul_hi_u32 s7, s4, s6
+; GFX1164GISEL-NEXT: s_mul_i32 s5, s5, s6
+; GFX1164GISEL-NEXT: s_mul_i32 s6, s4, s6
+; GFX1164GISEL-NEXT: s_add_u32 s7, s7, s5
+; GFX1164GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1164GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_clause 0x1
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132DAGISEL-NEXT: s_mov_b32 s8, exec_lo
+; GFX1132DAGISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX1132DAGISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1132DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX1132DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s6, s6
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s7, s2, s6
+; GFX1132DAGISEL-NEXT: s_mul_i32 s3, s3, s6
+; GFX1132DAGISEL-NEXT: s_mul_i32 s6, s2, s6
+; GFX1132DAGISEL-NEXT: s_add_u32 s7, s7, s3
+; GFX1132DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s2, s8
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s2
+; GFX1132DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s6, s4, s3
+; GFX1132DAGISEL-NEXT: s_mul_i32 s5, s5, s3
+; GFX1132DAGISEL-NEXT: s_mul_i32 s4, s4, s3
+; GFX1132DAGISEL-NEXT: s_add_u32 s5, s6, s5
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX1132DAGISEL-NEXT: ; %bb.4: ; %endif
+; GFX1132DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: divergent_cfg_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132GISEL-NEXT: s_mov_b32 s8, exec_lo
+; GFX1132GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
+; GFX1132GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1132GISEL-NEXT: ; %bb.1: ; %else
+; GFX1132GISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s6, s6
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mul_hi_u32 s7, s2, s6
+; GFX1132GISEL-NEXT: s_mul_i32 s3, s3, s6
+; GFX1132GISEL-NEXT: s_mul_i32 s6, s2, s6
+; GFX1132GISEL-NEXT: s_add_u32 s7, s7, s3
+; GFX1132GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s2, s8
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1132GISEL-NEXT: ; %bb.3: ; %if
+; GFX1132GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132GISEL-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mul_hi_u32 s7, s4, s3
+; GFX1132GISEL-NEXT: s_mul_i32 s5, s5, s3
+; GFX1132GISEL-NEXT: s_mul_i32 s6, s4, s3
+; GFX1132GISEL-NEXT: s_add_u32 s7, s7, s5
+; GFX1132GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1132GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %d_cmp = icmp ult i32 %tid, 16
+ br i1 %d_cmp, label %if, label %else
+
+if:
+ %reducedValTid = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 %in2, i32 1)
+ br label %endif
+
+else:
+ %reducedValIn = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 %in, i32 1)
+ br label %endif
+
+endif:
+ %combine = phi i64 [%reducedValTid, %if], [%reducedValIn, %else]
+ store i64 %combine, ptr addrspace(1) %out
+ ret void
+}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GFX10DAGISEL: {{.*}}
; GFX10GISEL: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
index edb888a21f735..9a0917133fc59 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
@@ -20,7 +20,7 @@ define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) {
; GFX8DAGISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8DAGISEL-NEXT: s_mul_i32 s3, s6, -1
+; GFX8DAGISEL-NEXT: s_sub_i32 s3, 0, s6
; GFX8DAGISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
@@ -35,7 +35,7 @@ define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) {
; GFX8GISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8GISEL-NEXT: s_mul_i32 s3, s6, -1
+; GFX8GISEL-NEXT: s_sub_i32 s3, 0, s6
; GFX8GISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s0
; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s2
@@ -51,7 +51,7 @@ define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) {
; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0
; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9DAGISEL-NEXT: s_mul_i32 s3, s6, -1
+; GFX9DAGISEL-NEXT: s_sub_i32 s3, 0, s6
; GFX9DAGISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s2
; GFX9DAGISEL-NEXT: global_store_dword v0, v1, s[0:1]
@@ -65,7 +65,7 @@ define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) {
; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX9GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9GISEL-NEXT: s_mul_i32 s3, s6, -1
+; GFX9GISEL-NEXT: s_sub_i32 s3, 0, s6
; GFX9GISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX9GISEL-NEXT: global_store_dword v1, v0, s[0:1]
@@ -80,7 +80,7 @@ define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) {
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, 0
; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064DAGISEL-NEXT: s_mul_i32 s3, s6, -1
+; GFX1064DAGISEL-NEXT: s_sub_i32 s3, 0, s6
; GFX1064DAGISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s2
; GFX1064DAGISEL-NEXT: global_store_dword v0, v1, s[0:1]
@@ -95,7 +95,7 @@ define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) {
; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064GISEL-NEXT: s_mul_i32 s3, s6, -1
+; GFX1064GISEL-NEXT: s_sub_i32 s3, 0, s6
; GFX1064GISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1064GISEL-NEXT: global_store_dword v1, v0, s[0:1]
@@ -110,7 +110,7 @@ define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) {
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, 0
; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s3, s3
; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032DAGISEL-NEXT: s_mul_i32 s2, s2, -1
+; GFX1032DAGISEL-NEXT: s_sub_i32 s2, 0, s2
; GFX1032DAGISEL-NEXT: s_mul_i32 s2, s2, s3
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s2
; GFX1032DAGISEL-NEXT: global_store_dword v0, v1, s[0:1]
@@ -125,7 +125,7 @@ define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) {
; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s3, s3
; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032GISEL-NEXT: s_mul_i32 s2, s2, -1
+; GFX1032GISEL-NEXT: s_sub_i32 s2, 0, s2
; GFX1032GISEL-NEXT: s_mul_i32 s2, s2, s3
; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1032GISEL-NEXT: global_store_dword v1, v0, s[0:1]
@@ -140,7 +140,7 @@ define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) {
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, 0
; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164DAGISEL-NEXT: s_mul_i32 s3, s6, -1
+; GFX1164DAGISEL-NEXT: s_sub_i32 s3, 0, s6
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164DAGISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s2
@@ -156,7 +156,7 @@ define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) {
; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164GISEL-NEXT: s_mul_i32 s3, s6, -1
+; GFX1164GISEL-NEXT: s_sub_i32 s3, 0, s6
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164GISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
@@ -172,7 +172,7 @@ define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) {
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s3, s3
; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132DAGISEL-NEXT: s_mul_i32 s2, s2, -1
+; GFX1132DAGISEL-NEXT: s_sub_i32 s2, 0, s2
; GFX1132DAGISEL-NEXT: s_mul_i32 s2, s2, s3
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
@@ -188,7 +188,7 @@ define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) {
; GFX1132GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s3, s3
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132GISEL-NEXT: s_mul_i32 s2, s2, -1
+; GFX1132GISEL-NEXT: s_sub_i32 s2, 0, s2
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1132GISEL-NEXT: s_mul_i32 s2, s2, s3
; GFX1132GISEL-NEXT: v_mov_b32_e32 v0, s2
@@ -205,7 +205,7 @@ define amdgpu_kernel void @const_value(ptr addrspace(1) %out) {
; GFX8DAGISEL: ; %bb.0: ; %entry
; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX8DAGISEL-NEXT: s_mov_b64 s[2:3], exec
-; GFX8DAGISEL-NEXT: s_mul_i32 s4, -1, 0x7b
+; GFX8DAGISEL-NEXT: s_sub_i32 s4, 0, 0x7b
; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX8DAGISEL-NEXT: s_mul_i32 s2, s4, s2
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -220,7 +220,7 @@ define amdgpu_kernel void @const_value(ptr addrspace(1) %out) {
; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX8GISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
-; GFX8GISEL-NEXT: s_mul_i32 s3, -1, 0x7b
+; GFX8GISEL-NEXT: s_sub_i32 s3, 0, 0x7b
; GFX8GISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s0
@@ -234,7 +234,7 @@ define amdgpu_kernel void @const_value(ptr addrspace(1) %out) {
; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX9DAGISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
-; GFX9DAGISEL-NEXT: s_mul_i32 s3, -1, 0x7b
+; GFX9DAGISEL-NEXT: s_sub_i32 s3, 0, 0x7b
; GFX9DAGISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s2
@@ -247,7 +247,7 @@ define amdgpu_kernel void @const_value(ptr addrspace(1) %out) {
; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX9GISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
-; GFX9GISEL-NEXT: s_mul_i32 s3, -1, 0x7b
+; GFX9GISEL-NEXT: s_sub_i32 s3, 0, 0x7b
; GFX9GISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX9GISEL-NEXT: v_mov_b32_e32 v1, 0
@@ -261,7 +261,7 @@ define amdgpu_kernel void @const_value(ptr addrspace(1) %out) {
; GFX1064DAGISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, 0
; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
-; GFX1064DAGISEL-NEXT: s_mul_i32 s3, -1, 0x7b
+; GFX1064DAGISEL-NEXT: s_sub_i32 s3, 0, 0x7b
; GFX1064DAGISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s2
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -274,7 +274,7 @@ define amdgpu_kernel void @const_value(ptr addrspace(1) %out) {
; GFX1064GISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
-; GFX1064GISEL-NEXT: s_mul_i32 s3, -1, 0x7b
+; GFX1064GISEL-NEXT: s_sub_i32 s3, 0, 0x7b
; GFX1064GISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -285,7 +285,7 @@ define amdgpu_kernel void @const_value(ptr addrspace(1) %out) {
; GFX1032DAGISEL: ; %bb.0: ; %entry
; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX1032DAGISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1032DAGISEL-NEXT: s_mul_i32 s3, -1, 0x7b
+; GFX1032DAGISEL-NEXT: s_sub_i32 s3, 0, 0x7b
; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, 0
; GFX1032DAGISEL-NEXT: s_mul_i32 s2, s3, s2
@@ -298,7 +298,7 @@ define amdgpu_kernel void @const_value(ptr addrspace(1) %out) {
; GFX1032GISEL: ; %bb.0: ; %entry
; GFX1032GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX1032GISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1032GISEL-NEXT: s_mul_i32 s3, -1, 0x7b
+; GFX1032GISEL-NEXT: s_sub_i32 s3, 0, 0x7b
; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX1032GISEL-NEXT: s_mul_i32 s2, s3, s2
@@ -313,7 +313,7 @@ define amdgpu_kernel void @const_value(ptr addrspace(1) %out) {
; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, 0
; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
-; GFX1164DAGISEL-NEXT: s_mul_i32 s3, -1, 0x7b
+; GFX1164DAGISEL-NEXT: s_sub_i32 s3, 0, 0x7b
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164DAGISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s2
@@ -327,7 +327,7 @@ define amdgpu_kernel void @const_value(ptr addrspace(1) %out) {
; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
-; GFX1164GISEL-NEXT: s_mul_i32 s3, -1, 0x7b
+; GFX1164GISEL-NEXT: s_sub_i32 s3, 0, 0x7b
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164GISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
@@ -339,7 +339,7 @@ define amdgpu_kernel void @const_value(ptr addrspace(1) %out) {
; GFX1132DAGISEL: ; %bb.0: ; %entry
; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT: s_mul_i32 s3, -1, 0x7b
+; GFX1132DAGISEL-NEXT: s_sub_i32 s3, 0, 0x7b
; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1132DAGISEL-NEXT: s_mul_i32 s2, s3, s2
@@ -352,7 +352,7 @@ define amdgpu_kernel void @const_value(ptr addrspace(1) %out) {
; GFX1132GISEL: ; %bb.0: ; %entry
; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT: s_mul_i32 s3, -1, 0x7b
+; GFX1132GISEL-NEXT: s_sub_i32 s3, 0, 0x7b
; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
; GFX1132GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX1132GISEL-NEXT: s_mul_i32 s2, s3, s2
@@ -374,7 +374,7 @@ define amdgpu_kernel void @poison_value(ptr addrspace(1) %out, i32 %in) {
; GFX8DAGISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8DAGISEL-NEXT: s_mul_i32 s4, s0, -1
+; GFX8DAGISEL-NEXT: s_sub_i32 s4, 0, s0
; GFX8DAGISEL-NEXT: s_mul_i32 s2, s4, s2
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
@@ -388,7 +388,7 @@ define amdgpu_kernel void @poison_value(ptr addrspace(1) %out, i32 %in) {
; GFX8GISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8GISEL-NEXT: s_mul_i32 s3, s0, -1
+; GFX8GISEL-NEXT: s_sub_i32 s3, 0, s0
; GFX8GISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s0
; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s2
@@ -403,7 +403,7 @@ define amdgpu_kernel void @poison_value(ptr addrspace(1) %out, i32 %in) {
; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0
; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9DAGISEL-NEXT: s_mul_i32 s3, s0, -1
+; GFX9DAGISEL-NEXT: s_sub_i32 s3, 0, s0
; GFX9DAGISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s2
; GFX9DAGISEL-NEXT: global_store_dword v0, v1, s[0:1]
@@ -416,7 +416,7 @@ define amdgpu_kernel void @poison_value(ptr addrspace(1) %out, i32 %in) {
; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX9GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9GISEL-NEXT: s_mul_i32 s3, s0, -1
+; GFX9GISEL-NEXT: s_sub_i32 s3, 0, s0
; GFX9GISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX9GISEL-NEXT: global_store_dword v1, v0, s[0:1]
@@ -429,7 +429,7 @@ define amdgpu_kernel void @poison_value(ptr addrspace(1) %out, i32 %in) {
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, 0
; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064DAGISEL-NEXT: s_mul_i32 s3, s0, -1
+; GFX1064DAGISEL-NEXT: s_sub_i32 s3, 0, s0
; GFX1064DAGISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s2
; GFX1064DAGISEL-NEXT: global_store_dword v0, v1, s[0:1]
@@ -442,7 +442,7 @@ define amdgpu_kernel void @poison_value(ptr addrspace(1) %out, i32 %in) {
; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064GISEL-NEXT: s_mul_i32 s3, s0, -1
+; GFX1064GISEL-NEXT: s_sub_i32 s3, 0, s0
; GFX1064GISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1064GISEL-NEXT: global_store_dword v1, v0, s[0:1]
@@ -455,7 +455,7 @@ define amdgpu_kernel void @poison_value(ptr addrspace(1) %out, i32 %in) {
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, 0
; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032DAGISEL-NEXT: s_mul_i32 s3, s0, -1
+; GFX1032DAGISEL-NEXT: s_sub_i32 s3, 0, s0
; GFX1032DAGISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s2
; GFX1032DAGISEL-NEXT: global_store_dword v0, v1, s[0:1]
@@ -468,7 +468,7 @@ define amdgpu_kernel void @poison_value(ptr addrspace(1) %out, i32 %in) {
; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032GISEL-NEXT: s_mul_i32 s3, s0, -1
+; GFX1032GISEL-NEXT: s_sub_i32 s3, 0, s0
; GFX1032GISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1032GISEL-NEXT: global_store_dword v1, v0, s[0:1]
@@ -481,7 +481,7 @@ define amdgpu_kernel void @poison_value(ptr addrspace(1) %out, i32 %in) {
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, 0
; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164DAGISEL-NEXT: s_mul_i32 s3, s0, -1
+; GFX1164DAGISEL-NEXT: s_sub_i32 s3, 0, s0
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164DAGISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s2
@@ -495,7 +495,7 @@ define amdgpu_kernel void @poison_value(ptr addrspace(1) %out, i32 %in) {
; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164GISEL-NEXT: s_mul_i32 s3, s0, -1
+; GFX1164GISEL-NEXT: s_sub_i32 s3, 0, s0
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164GISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
@@ -509,7 +509,7 @@ define amdgpu_kernel void @poison_value(ptr addrspace(1) %out, i32 %in) {
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132DAGISEL-NEXT: s_mul_i32 s3, s0, -1
+; GFX1132DAGISEL-NEXT: s_sub_i32 s3, 0, s0
; GFX1132DAGISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
@@ -523,7 +523,7 @@ define amdgpu_kernel void @poison_value(ptr addrspace(1) %out, i32 %in) {
; GFX1132GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132GISEL-NEXT: s_mul_i32 s3, s0, -1
+; GFX1132GISEL-NEXT: s_sub_i32 s3, 0, s0
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1132GISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX1132GISEL-NEXT: v_mov_b32_e32 v0, s2
@@ -792,7 +792,7 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX8DAGISEL-NEXT: ; implicit-def: $vgpr0
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8DAGISEL-NEXT: s_mul_i32 s3, s6, -1
+; GFX8DAGISEL-NEXT: s_sub_i32 s3, 0, s6
; GFX8DAGISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX8DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
@@ -833,7 +833,7 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX8GISEL-NEXT: ; implicit-def: $vgpr0
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8GISEL-NEXT: s_mul_i32 s3, s6, -1
+; GFX8GISEL-NEXT: s_sub_i32 s3, 0, s6
; GFX8GISEL-NEXT: s_mul_i32 s6, s3, s2
; GFX8GISEL-NEXT: .LBB4_2: ; %Flow
; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1]
@@ -871,7 +871,7 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX9DAGISEL-NEXT: ; implicit-def: $vgpr0
; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9DAGISEL-NEXT: s_mul_i32 s3, s6, -1
+; GFX9DAGISEL-NEXT: s_sub_i32 s3, 0, s6
; GFX9DAGISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX9DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
@@ -911,7 +911,7 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX9GISEL-NEXT: ; implicit-def: $vgpr0
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9GISEL-NEXT: s_mul_i32 s3, s6, -1
+; GFX9GISEL-NEXT: s_sub_i32 s3, 0, s6
; GFX9GISEL-NEXT: s_mul_i32 s6, s3, s2
; GFX9GISEL-NEXT: .LBB4_2: ; %Flow
; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1]
@@ -948,7 +948,7 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1064DAGISEL-NEXT: ; implicit-def: $vgpr0
; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064DAGISEL-NEXT: s_mul_i32 s3, s6, -1
+; GFX1064DAGISEL-NEXT: s_sub_i32 s3, 0, s6
; GFX1064DAGISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX1064DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
@@ -988,7 +988,7 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1064GISEL-NEXT: ; implicit-def: $vgpr0
; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064GISEL-NEXT: s_mul_i32 s3, s6, -1
+; GFX1064GISEL-NEXT: s_sub_i32 s3, 0, s6
; GFX1064GISEL-NEXT: s_mul_i32 s6, s3, s2
; GFX1064GISEL-NEXT: .LBB4_2: ; %Flow
; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1]
@@ -1025,7 +1025,7 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1032DAGISEL-NEXT: ; implicit-def: $vgpr0
; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032DAGISEL-NEXT: s_mul_i32 s1, s1, -1
+; GFX1032DAGISEL-NEXT: s_sub_i32 s1, 0, s1
; GFX1032DAGISEL-NEXT: s_mul_i32 s1, s1, s2
; GFX1032DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s0, s0
@@ -1065,7 +1065,7 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1032GISEL-NEXT: ; implicit-def: $vgpr0
; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032GISEL-NEXT: s_mul_i32 s0, s0, -1
+; GFX1032GISEL-NEXT: s_sub_i32 s0, 0, s0
; GFX1032GISEL-NEXT: s_mul_i32 s0, s0, s2
; GFX1032GISEL-NEXT: .LBB4_2: ; %Flow
; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s1, s1
@@ -1105,7 +1105,7 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164DAGISEL-NEXT: s_mul_i32 s3, s6, -1
+; GFX1164DAGISEL-NEXT: s_sub_i32 s3, 0, s6
; GFX1164DAGISEL-NEXT: s_mul_i32 s2, s3, s2
; GFX1164DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
@@ -1149,7 +1149,7 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164GISEL-NEXT: s_mul_i32 s3, s6, -1
+; GFX1164GISEL-NEXT: s_sub_i32 s3, 0, s6
; GFX1164GISEL-NEXT: s_mul_i32 s6, s3, s2
; GFX1164GISEL-NEXT: .LBB4_2: ; %Flow
; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[0:1], s[0:1]
@@ -1190,7 +1190,7 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132DAGISEL-NEXT: s_mul_i32 s1, s1, -1
+; GFX1132DAGISEL-NEXT: s_sub_i32 s1, 0, s1
; GFX1132DAGISEL-NEXT: s_mul_i32 s1, s1, s2
; GFX1132DAGISEL-NEXT: .LBB4_2: ; %Flow
; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s0, s0
@@ -1234,7 +1234,7 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132GISEL-NEXT: s_mul_i32 s0, s0, -1
+; GFX1132GISEL-NEXT: s_sub_i32 s0, 0, s0
; GFX1132GISEL-NEXT: s_mul_i32 s0, s0, s2
; GFX1132GISEL-NEXT: .LBB4_2: ; %Flow
; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s1, s1
@@ -1275,6 +1275,1621 @@ endif:
store i32 %combine, ptr addrspace(1) %out
ret void
}
+
+define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: uniform_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX8DAGISEL-NEXT: s_sub_i32 s4, 0, s4
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: s_ashr_i32 s0, s4, 31
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: s_mul_i32 s1, s2, s0
+; GFX8DAGISEL-NEXT: s_mul_i32 s0, s2, s4
+; GFX8DAGISEL-NEXT: s_mul_hi_u32 s2, s2, s4
+; GFX8DAGISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX8DAGISEL-NEXT: s_add_u32 s2, s2, s3
+; GFX8DAGISEL-NEXT: s_add_u32 s1, s2, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: uniform_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX8GISEL-NEXT: s_sub_i32 s5, 0, s4
+; GFX8GISEL-NEXT: s_ashr_i32 s4, s5, 31
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mul_i32 s6, s2, s4
+; GFX8GISEL-NEXT: s_mul_i32 s4, s2, s5
+; GFX8GISEL-NEXT: s_mul_hi_u32 s2, s2, s5
+; GFX8GISEL-NEXT: s_mul_i32 s3, s3, s5
+; GFX8GISEL-NEXT: s_add_u32 s2, s2, s3
+; GFX8GISEL-NEXT: s_add_u32 s5, s2, s6
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: uniform_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9DAGISEL-NEXT: s_sub_i32 s5, 0, s4
+; GFX9DAGISEL-NEXT: s_ashr_i32 s4, s5, 31
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_mul_i32 s6, s2, s4
+; GFX9DAGISEL-NEXT: s_mul_i32 s4, s2, s5
+; GFX9DAGISEL-NEXT: s_mul_hi_u32 s2, s2, s5
+; GFX9DAGISEL-NEXT: s_mul_i32 s3, s3, s5
+; GFX9DAGISEL-NEXT: s_add_u32 s2, s2, s3
+; GFX9DAGISEL-NEXT: s_add_u32 s5, s2, s6
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: uniform_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9GISEL-NEXT: s_sub_i32 s5, 0, s4
+; GFX9GISEL-NEXT: s_ashr_i32 s4, s5, 31
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mul_i32 s6, s2, s4
+; GFX9GISEL-NEXT: s_mul_i32 s4, s2, s5
+; GFX9GISEL-NEXT: s_mul_hi_u32 s2, s2, s5
+; GFX9GISEL-NEXT: s_mul_i32 s3, s3, s5
+; GFX9GISEL-NEXT: s_add_u32 s2, s2, s3
+; GFX9GISEL-NEXT: s_add_u32 s5, s2, s6
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX1064DAGISEL-LABEL: uniform_value_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064DAGISEL-NEXT: s_sub_i32 s4, 0, s4
+; GFX1064DAGISEL-NEXT: s_ashr_i32 s5, s4, 31
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s6, s2, s4
+; GFX1064DAGISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1064DAGISEL-NEXT: s_mul_i32 s5, s2, s5
+; GFX1064DAGISEL-NEXT: s_add_u32 s3, s6, s3
+; GFX1064DAGISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1064DAGISEL-NEXT: s_add_u32 s3, s3, s5
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT: s_endpgm
+;
+; GFX1064GISEL-LABEL: uniform_value_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064GISEL-NEXT: s_sub_i32 s4, 0, s4
+; GFX1064GISEL-NEXT: s_ashr_i32 s5, s4, 31
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mul_hi_u32 s6, s2, s4
+; GFX1064GISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1064GISEL-NEXT: s_mul_i32 s5, s2, s5
+; GFX1064GISEL-NEXT: s_add_u32 s3, s6, s3
+; GFX1064GISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1064GISEL-NEXT: s_add_u32 s3, s3, s5
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT: s_endpgm
+;
+; GFX1032DAGISEL-LABEL: uniform_value_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT: s_mov_b32 s4, exec_lo
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s4, s4
+; GFX1032DAGISEL-NEXT: s_sub_i32 s4, 0, s4
+; GFX1032DAGISEL-NEXT: s_ashr_i32 s5, s4, 31
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s6, s2, s4
+; GFX1032DAGISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1032DAGISEL-NEXT: s_mul_i32 s5, s2, s5
+; GFX1032DAGISEL-NEXT: s_add_u32 s3, s6, s3
+; GFX1032DAGISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1032DAGISEL-NEXT: s_add_u32 s3, s3, s5
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT: s_endpgm
+;
+; GFX1032GISEL-LABEL: uniform_value_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT: s_mov_b32 s4, exec_lo
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s4, s4
+; GFX1032GISEL-NEXT: s_sub_i32 s4, 0, s4
+; GFX1032GISEL-NEXT: s_ashr_i32 s5, s4, 31
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mul_hi_u32 s6, s2, s4
+; GFX1032GISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1032GISEL-NEXT: s_mul_i32 s5, s2, s5
+; GFX1032GISEL-NEXT: s_add_u32 s3, s6, s3
+; GFX1032GISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1032GISEL-NEXT: s_add_u32 s3, s3, s5
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: uniform_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: s_sub_i32 s4, 0, s4
+; GFX1164DAGISEL-NEXT: s_ashr_i32 s5, s4, 31
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s6, s2, s4
+; GFX1164DAGISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1164DAGISEL-NEXT: s_mul_i32 s5, s2, s5
+; GFX1164DAGISEL-NEXT: s_add_u32 s3, s6, s3
+; GFX1164DAGISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1164DAGISEL-NEXT: s_add_u32 s3, s3, s5
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: uniform_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: s_sub_i32 s4, 0, s4
+; GFX1164GISEL-NEXT: s_ashr_i32 s5, s4, 31
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mul_hi_u32 s6, s2, s4
+; GFX1164GISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1164GISEL-NEXT: s_mul_i32 s5, s2, s5
+; GFX1164GISEL-NEXT: s_add_u32 s3, s6, s3
+; GFX1164GISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1164GISEL-NEXT: s_add_u32 s3, s3, s5
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: uniform_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: s_mov_b32 s4, exec_lo
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s4, s4
+; GFX1132DAGISEL-NEXT: s_sub_i32 s4, 0, s4
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_ashr_i32 s5, s4, 31
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s6, s2, s4
+; GFX1132DAGISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1132DAGISEL-NEXT: s_mul_i32 s5, s2, s5
+; GFX1132DAGISEL-NEXT: s_add_u32 s3, s6, s3
+; GFX1132DAGISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1132DAGISEL-NEXT: s_add_u32 s3, s3, s5
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: uniform_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: s_mov_b32 s4, exec_lo
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s4, s4
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: s_sub_i32 s4, 0, s4
+; GFX1132GISEL-NEXT: s_ashr_i32 s5, s4, 31
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mul_hi_u32 s6, s2, s4
+; GFX1132GISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1132GISEL-NEXT: s_mul_i32 s5, s2, s5
+; GFX1132GISEL-NEXT: s_add_u32 s3, s6, s3
+; GFX1132GISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1132GISEL-NEXT: s_add_u32 s3, s3, s5
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 %in, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
+; GFX8DAGISEL-LABEL: const_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX8DAGISEL-NEXT: s_sub_i32 s3, 0, s2
+; GFX8DAGISEL-NEXT: s_ashr_i32 s2, s3, 31
+; GFX8DAGISEL-NEXT: s_mul_i32 s4, s2, 0x7b
+; GFX8DAGISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX8DAGISEL-NEXT: s_mul_hi_u32 s5, 0x7b, s3
+; GFX8DAGISEL-NEXT: s_mul_i32 s3, s3, 0
+; GFX8DAGISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX8DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: const_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX8GISEL-NEXT: s_sub_i32 s3, 0, s2
+; GFX8GISEL-NEXT: s_ashr_i32 s2, s3, 31
+; GFX8GISEL-NEXT: s_mul_i32 s4, s2, 0x7b
+; GFX8GISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX8GISEL-NEXT: s_mul_hi_u32 s5, 0x7b, s3
+; GFX8GISEL-NEXT: s_mul_i32 s3, s3, 0
+; GFX8GISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX8GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: const_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9DAGISEL-NEXT: s_sub_i32 s3, 0, s2
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: s_ashr_i32 s2, s3, 31
+; GFX9DAGISEL-NEXT: s_mul_i32 s4, s2, 0x7b
+; GFX9DAGISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX9DAGISEL-NEXT: s_mul_hi_u32 s5, 0x7b, s3
+; GFX9DAGISEL-NEXT: s_mul_i32 s3, s3, 0
+; GFX9DAGISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX9DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: const_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9GISEL-NEXT: s_sub_i32 s3, 0, s2
+; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT: s_ashr_i32 s2, s3, 31
+; GFX9GISEL-NEXT: s_mul_i32 s4, s2, 0x7b
+; GFX9GISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX9GISEL-NEXT: s_mul_hi_u32 s5, 0x7b, s3
+; GFX9GISEL-NEXT: s_mul_i32 s3, s3, 0
+; GFX9GISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX9GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX1064DAGISEL-LABEL: const_value_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064DAGISEL-NEXT: s_sub_i32 s2, 0, s2
+; GFX1064DAGISEL-NEXT: s_ashr_i32 s3, s2, 31
+; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
+; GFX1064DAGISEL-NEXT: s_mul_i32 s5, s2, 0
+; GFX1064DAGISEL-NEXT: s_mulk_i32 s3, 0x7b
+; GFX1064DAGISEL-NEXT: s_add_u32 s4, s4, s5
+; GFX1064DAGISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1064DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT: s_endpgm
+;
+; GFX1064GISEL-LABEL: const_value_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1064GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064GISEL-NEXT: s_sub_i32 s2, 0, s2
+; GFX1064GISEL-NEXT: s_ashr_i32 s3, s2, 31
+; GFX1064GISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
+; GFX1064GISEL-NEXT: s_mul_i32 s5, s2, 0
+; GFX1064GISEL-NEXT: s_mulk_i32 s3, 0x7b
+; GFX1064GISEL-NEXT: s_add_u32 s4, s4, s5
+; GFX1064GISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1064GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT: s_endpgm
+;
+; GFX1032DAGISEL-LABEL: const_value_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1032DAGISEL-NEXT: s_sub_i32 s2, 0, s2
+; GFX1032DAGISEL-NEXT: s_ashr_i32 s3, s2, 31
+; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
+; GFX1032DAGISEL-NEXT: s_mul_i32 s5, s2, 0
+; GFX1032DAGISEL-NEXT: s_mulk_i32 s3, 0x7b
+; GFX1032DAGISEL-NEXT: s_add_u32 s4, s4, s5
+; GFX1032DAGISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1032DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT: s_endpgm
+;
+; GFX1032GISEL-LABEL: const_value_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1032GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1032GISEL-NEXT: s_sub_i32 s2, 0, s2
+; GFX1032GISEL-NEXT: s_ashr_i32 s3, s2, 31
+; GFX1032GISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
+; GFX1032GISEL-NEXT: s_mul_i32 s5, s2, 0
+; GFX1032GISEL-NEXT: s_mulk_i32 s3, 0x7b
+; GFX1032GISEL-NEXT: s_add_u32 s4, s4, s5
+; GFX1032GISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1032GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: const_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: s_sub_i32 s2, 0, s2
+; GFX1164DAGISEL-NEXT: s_ashr_i32 s3, s2, 31
+; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
+; GFX1164DAGISEL-NEXT: s_mul_i32 s5, s2, 0
+; GFX1164DAGISEL-NEXT: s_mulk_i32 s3, 0x7b
+; GFX1164DAGISEL-NEXT: s_add_u32 s4, s4, s5
+; GFX1164DAGISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1164DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: const_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: s_sub_i32 s2, 0, s2
+; GFX1164GISEL-NEXT: s_ashr_i32 s3, s2, 31
+; GFX1164GISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
+; GFX1164GISEL-NEXT: s_mul_i32 s5, s2, 0
+; GFX1164GISEL-NEXT: s_mulk_i32 s3, 0x7b
+; GFX1164GISEL-NEXT: s_add_u32 s4, s4, s5
+; GFX1164GISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1164GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: const_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1132DAGISEL-NEXT: s_sub_i32 s2, 0, s2
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_ashr_i32 s3, s2, 31
+; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
+; GFX1132DAGISEL-NEXT: s_mul_i32 s5, s2, 0
+; GFX1132DAGISEL-NEXT: s_mulk_i32 s3, 0x7b
+; GFX1132DAGISEL-NEXT: s_add_u32 s4, s4, s5
+; GFX1132DAGISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1132DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: const_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: s_sub_i32 s2, 0, s2
+; GFX1132GISEL-NEXT: s_ashr_i32 s3, s2, 31
+; GFX1132GISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
+; GFX1132GISEL-NEXT: s_mul_i32 s5, s2, 0
+; GFX1132GISEL-NEXT: s_mulk_i32 s3, 0x7b
+; GFX1132GISEL-NEXT: s_add_u32 s4, s4, s5
+; GFX1132GISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1132GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 123, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: poison_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX8DAGISEL-NEXT: s_sub_i32 s3, 0, s2
+; GFX8DAGISEL-NEXT: s_ashr_i32 s2, s3, 31
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_mul_i32 s4, s0, s2
+; GFX8DAGISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX8DAGISEL-NEXT: s_mul_hi_u32 s5, s0, s3
+; GFX8DAGISEL-NEXT: s_mul_i32 s3, s1, s3
+; GFX8DAGISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX8DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: poison_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX8GISEL-NEXT: s_sub_i32 s3, 0, s2
+; GFX8GISEL-NEXT: s_ashr_i32 s2, s3, 31
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mul_i32 s4, s0, s2
+; GFX8GISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX8GISEL-NEXT: s_mul_hi_u32 s5, s0, s3
+; GFX8GISEL-NEXT: s_mul_i32 s3, s1, s3
+; GFX8GISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX8GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: poison_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9DAGISEL-NEXT: s_sub_i32 s3, 0, s2
+; GFX9DAGISEL-NEXT: s_ashr_i32 s2, s3, 31
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_mul_i32 s4, s0, s2
+; GFX9DAGISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX9DAGISEL-NEXT: s_mul_hi_u32 s5, s0, s3
+; GFX9DAGISEL-NEXT: s_mul_i32 s3, s1, s3
+; GFX9DAGISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX9DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: poison_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9GISEL-NEXT: s_sub_i32 s3, 0, s2
+; GFX9GISEL-NEXT: s_ashr_i32 s2, s3, 31
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mul_i32 s4, s0, s2
+; GFX9GISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX9GISEL-NEXT: s_mul_hi_u32 s5, s0, s3
+; GFX9GISEL-NEXT: s_mul_i32 s3, s1, s3
+; GFX9GISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX9GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX1064DAGISEL-LABEL: poison_value_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064DAGISEL-NEXT: s_sub_i32 s2, 0, s2
+; GFX1064DAGISEL-NEXT: s_ashr_i32 s3, s2, 31
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s4, s0, s2
+; GFX1064DAGISEL-NEXT: s_mul_i32 s5, s1, s2
+; GFX1064DAGISEL-NEXT: s_mul_i32 s3, s0, s3
+; GFX1064DAGISEL-NEXT: s_add_u32 s4, s4, s5
+; GFX1064DAGISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1064DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT: s_endpgm
+;
+; GFX1064GISEL-LABEL: poison_value_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1064GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064GISEL-NEXT: s_sub_i32 s2, 0, s2
+; GFX1064GISEL-NEXT: s_ashr_i32 s3, s2, 31
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mul_hi_u32 s4, s0, s2
+; GFX1064GISEL-NEXT: s_mul_i32 s5, s1, s2
+; GFX1064GISEL-NEXT: s_mul_i32 s3, s0, s3
+; GFX1064GISEL-NEXT: s_add_u32 s4, s4, s5
+; GFX1064GISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1064GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT: s_endpgm
+;
+; GFX1032DAGISEL-LABEL: poison_value_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1032DAGISEL-NEXT: s_sub_i32 s2, 0, s2
+; GFX1032DAGISEL-NEXT: s_ashr_i32 s3, s2, 31
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s4, s0, s2
+; GFX1032DAGISEL-NEXT: s_mul_i32 s5, s1, s2
+; GFX1032DAGISEL-NEXT: s_mul_i32 s3, s0, s3
+; GFX1032DAGISEL-NEXT: s_add_u32 s4, s4, s5
+; GFX1032DAGISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1032DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT: s_endpgm
+;
+; GFX1032GISEL-LABEL: poison_value_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1032GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1032GISEL-NEXT: s_sub_i32 s2, 0, s2
+; GFX1032GISEL-NEXT: s_ashr_i32 s3, s2, 31
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mul_hi_u32 s4, s0, s2
+; GFX1032GISEL-NEXT: s_mul_i32 s5, s1, s2
+; GFX1032GISEL-NEXT: s_mul_i32 s3, s0, s3
+; GFX1032GISEL-NEXT: s_add_u32 s4, s4, s5
+; GFX1032GISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1032GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: poison_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: s_sub_i32 s2, 0, s2
+; GFX1164DAGISEL-NEXT: s_ashr_i32 s3, s2, 31
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s4, s0, s2
+; GFX1164DAGISEL-NEXT: s_mul_i32 s5, s1, s2
+; GFX1164DAGISEL-NEXT: s_mul_i32 s3, s0, s3
+; GFX1164DAGISEL-NEXT: s_add_u32 s4, s4, s5
+; GFX1164DAGISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1164DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: poison_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: s_sub_i32 s2, 0, s2
+; GFX1164GISEL-NEXT: s_ashr_i32 s3, s2, 31
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mul_hi_u32 s4, s0, s2
+; GFX1164GISEL-NEXT: s_mul_i32 s5, s1, s2
+; GFX1164GISEL-NEXT: s_mul_i32 s3, s0, s3
+; GFX1164GISEL-NEXT: s_add_u32 s4, s4, s5
+; GFX1164GISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1164GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: poison_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1132DAGISEL-NEXT: s_sub_i32 s2, 0, s2
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_ashr_i32 s3, s2, 31
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s4, s0, s2
+; GFX1132DAGISEL-NEXT: s_mul_i32 s5, s1, s2
+; GFX1132DAGISEL-NEXT: s_mul_i32 s3, s0, s3
+; GFX1132DAGISEL-NEXT: s_add_u32 s4, s4, s5
+; GFX1132DAGISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1132DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: poison_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: s_sub_i32 s2, 0, s2
+; GFX1132GISEL-NEXT: s_ashr_i32 s3, s2, 31
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mul_hi_u32 s4, s0, s2
+; GFX1132GISEL-NEXT: s_mul_i32 s5, s1, s2
+; GFX1132GISEL-NEXT: s_mul_i32 s3, s0, s3
+; GFX1132GISEL-NEXT: s_add_u32 s4, s4, s5
+; GFX1132GISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1132GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 poison, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
+; GFX8DAGISEL-LABEL: divergent_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
+; GFX8DAGISEL-NEXT: v_readlane_b32 s9, v2, s8
+; GFX8DAGISEL-NEXT: v_readlane_b32 s10, v3, s8
+; GFX8DAGISEL-NEXT: s_sub_u32 s4, s4, s9
+; GFX8DAGISEL-NEXT: s_bitset0_b64 s[6:7], s8
+; GFX8DAGISEL-NEXT: s_subb_u32 s5, s5, s10
+; GFX8DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX8DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX8DAGISEL-NEXT: ; %bb.2:
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8GISEL-LABEL: divergent_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b32 s4, 0
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT: s_mov_b32 s5, s4
+; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8GISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
+; GFX8GISEL-NEXT: v_readlane_b32 s9, v2, s8
+; GFX8GISEL-NEXT: v_readlane_b32 s10, v3, s8
+; GFX8GISEL-NEXT: s_sub_u32 s4, s4, s9
+; GFX8GISEL-NEXT: s_bitset0_b64 s[6:7], s8
+; GFX8GISEL-NEXT: s_subb_u32 s5, s5, s10
+; GFX8GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX8GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX8GISEL-NEXT: ; %bb.2:
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9DAGISEL-LABEL: divergent_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
+; GFX9DAGISEL-NEXT: v_readlane_b32 s9, v2, s8
+; GFX9DAGISEL-NEXT: v_readlane_b32 s10, v3, s8
+; GFX9DAGISEL-NEXT: s_sub_u32 s4, s4, s9
+; GFX9DAGISEL-NEXT: s_bitset0_b64 s[6:7], s8
+; GFX9DAGISEL-NEXT: s_subb_u32 s5, s5, s10
+; GFX9DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX9DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX9DAGISEL-NEXT: ; %bb.2:
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9GISEL-LABEL: divergent_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b32 s4, 0
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT: s_mov_b32 s5, s4
+; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9GISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
+; GFX9GISEL-NEXT: v_readlane_b32 s9, v2, s8
+; GFX9GISEL-NEXT: v_readlane_b32 s10, v3, s8
+; GFX9GISEL-NEXT: s_sub_u32 s4, s4, s9
+; GFX9GISEL-NEXT: s_bitset0_b64 s[6:7], s8
+; GFX9GISEL-NEXT: s_subb_u32 s5, s5, s10
+; GFX9GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX9GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX9GISEL-NEXT: ; %bb.2:
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX9GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064DAGISEL-LABEL: divergent_value_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s9, v2, s8
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s10, v3, s8
+; GFX1064DAGISEL-NEXT: s_bitset0_b64 s[6:7], s8
+; GFX1064DAGISEL-NEXT: s_sub_u32 s4, s4, s9
+; GFX1064DAGISEL-NEXT: s_subb_u32 s5, s5, s10
+; GFX1064DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX1064DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1064DAGISEL-NEXT: ; %bb.2:
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064GISEL-LABEL: divergent_value_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b32 s4, 0
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT: s_mov_b32 s5, s4
+; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064GISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
+; GFX1064GISEL-NEXT: v_readlane_b32 s9, v2, s8
+; GFX1064GISEL-NEXT: v_readlane_b32 s10, v3, s8
+; GFX1064GISEL-NEXT: s_bitset0_b64 s[6:7], s8
+; GFX1064GISEL-NEXT: s_sub_u32 s4, s4, s9
+; GFX1064GISEL-NEXT: s_subb_u32 s5, s5, s10
+; GFX1064GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX1064GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1064GISEL-NEXT: ; %bb.2:
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1064GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032DAGISEL-LABEL: divergent_value_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s8, v2, s7
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s9, v3, s7
+; GFX1032DAGISEL-NEXT: s_bitset0_b32 s6, s7
+; GFX1032DAGISEL-NEXT: s_sub_u32 s4, s4, s8
+; GFX1032DAGISEL-NEXT: s_subb_u32 s5, s5, s9
+; GFX1032DAGISEL-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1032DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1032DAGISEL-NEXT: ; %bb.2:
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032GISEL-LABEL: divergent_value_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b32 s4, 0
+; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT: s_mov_b32 s5, s4
+; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
+; GFX1032GISEL-NEXT: v_readlane_b32 s8, v2, s7
+; GFX1032GISEL-NEXT: v_readlane_b32 s9, v3, s7
+; GFX1032GISEL-NEXT: s_bitset0_b32 s6, s7
+; GFX1032GISEL-NEXT: s_sub_u32 s4, s4, s8
+; GFX1032GISEL-NEXT: s_subb_u32 s5, s5, s9
+; GFX1032GISEL-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1032GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1032GISEL-NEXT: ; %bb.2:
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164DAGISEL-LABEL: divergent_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_mov_b32 s0, 0
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT: s_mov_b32 s1, s0
+; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s4, s[2:3]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v2, s4
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s6, v3, s4
+; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s4
+; GFX1164DAGISEL-NEXT: s_sub_u32 s0, s0, s5
+; GFX1164DAGISEL-NEXT: s_subb_u32 s1, s1, s6
+; GFX1164DAGISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1164DAGISEL-NEXT: ; %bb.2:
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1164DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164GISEL-LABEL: divergent_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b32 s0, 0
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT: s_mov_b32 s1, s0
+; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT: s_ctz_i32_b64 s4, s[2:3]
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: v_readlane_b32 s5, v2, s4
+; GFX1164GISEL-NEXT: v_readlane_b32 s6, v3, s4
+; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s4
+; GFX1164GISEL-NEXT: s_sub_u32 s0, s0, s5
+; GFX1164GISEL-NEXT: s_subb_u32 s1, s1, s6
+; GFX1164GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1164GISEL-NEXT: ; %bb.2:
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1164GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132DAGISEL-LABEL: divergent_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_mov_b32 s0, 0
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT: s_mov_b32 s1, s0
+; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX1132DAGISEL-NEXT: s_sub_u32 s0, s0, s4
+; GFX1132DAGISEL-NEXT: s_subb_u32 s1, s1, s5
+; GFX1132DAGISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1132DAGISEL-NEXT: ; %bb.2:
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132GISEL-LABEL: divergent_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b32 s0, 0
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT: s_mov_b32 s1, s0
+; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX1132GISEL-NEXT: s_sub_u32 s0, s0, s4
+; GFX1132GISEL-NEXT: s_subb_u32 s1, s1, s5
+; GFX1132GISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1132GISEL-NEXT: ; %bb.2:
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132GISEL-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 %id.x, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64 %in2) {
+; GFX8DAGISEL-LABEL: divergent_cfg_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX8DAGISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX8DAGISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX8DAGISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX8DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8DAGISEL-NEXT: s_sub_i32 s7, 0, s6
+; GFX8DAGISEL-NEXT: s_ashr_i32 s6, s7, 31
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_mul_i32 s10, s2, s6
+; GFX8DAGISEL-NEXT: s_mul_i32 s6, s2, s7
+; GFX8DAGISEL-NEXT: s_mul_hi_u32 s2, s2, s7
+; GFX8DAGISEL-NEXT: s_mul_i32 s3, s3, s7
+; GFX8DAGISEL-NEXT: s_add_u32 s2, s2, s3
+; GFX8DAGISEL-NEXT: s_add_u32 s7, s2, s10
+; GFX8DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[8:9]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX8DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
+; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX8DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8DAGISEL-NEXT: s_sub_i32 s7, 0, s6
+; GFX8DAGISEL-NEXT: s_ashr_i32 s6, s7, 31
+; GFX8DAGISEL-NEXT: s_mul_i32 s8, s4, s6
+; GFX8DAGISEL-NEXT: s_mul_i32 s6, s4, s7
+; GFX8DAGISEL-NEXT: s_mul_hi_u32 s4, s4, s7
+; GFX8DAGISEL-NEXT: s_mul_i32 s5, s5, s7
+; GFX8DAGISEL-NEXT: s_add_u32 s4, s4, s5
+; GFX8DAGISEL-NEXT: s_add_u32 s7, s4, s8
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX8DAGISEL-NEXT: .LBB9_4: ; %endif
+; GFX8DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: divergent_cfg_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX8GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX8GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX8GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX8GISEL-NEXT: ; %bb.1: ; %else
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8GISEL-NEXT: s_sub_i32 s7, 0, s6
+; GFX8GISEL-NEXT: s_ashr_i32 s6, s7, 31
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mul_i32 s10, s2, s6
+; GFX8GISEL-NEXT: s_mul_i32 s6, s2, s7
+; GFX8GISEL-NEXT: s_mul_hi_u32 s2, s2, s7
+; GFX8GISEL-NEXT: s_mul_i32 s3, s3, s7
+; GFX8GISEL-NEXT: s_add_u32 s2, s2, s3
+; GFX8GISEL-NEXT: s_add_u32 s7, s2, s10
+; GFX8GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX8GISEL-NEXT: ; %bb.3: ; %if
+; GFX8GISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8GISEL-NEXT: s_sub_i32 s7, 0, s6
+; GFX8GISEL-NEXT: s_ashr_i32 s6, s7, 31
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mul_i32 s8, s4, s6
+; GFX8GISEL-NEXT: s_mul_i32 s6, s4, s7
+; GFX8GISEL-NEXT: s_mul_hi_u32 s4, s4, s7
+; GFX8GISEL-NEXT: s_mul_i32 s5, s5, s7
+; GFX8GISEL-NEXT: s_add_u32 s4, s4, s5
+; GFX8GISEL-NEXT: s_add_u32 s7, s4, s8
+; GFX8GISEL-NEXT: .LBB9_4: ; %endif
+; GFX8GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: divergent_cfg_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX9DAGISEL-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9DAGISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX9DAGISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX9DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9DAGISEL-NEXT: s_sub_i32 s5, 0, s4
+; GFX9DAGISEL-NEXT: s_ashr_i32 s4, s5, 31
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_mul_i32 s10, s2, s4
+; GFX9DAGISEL-NEXT: s_mul_i32 s4, s2, s5
+; GFX9DAGISEL-NEXT: s_mul_hi_u32 s2, s2, s5
+; GFX9DAGISEL-NEXT: s_mul_i32 s3, s3, s5
+; GFX9DAGISEL-NEXT: s_add_u32 s2, s2, s3
+; GFX9DAGISEL-NEXT: s_add_u32 s5, s2, s10
+; GFX9DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[8:9]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
+; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX9DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9DAGISEL-NEXT: s_sub_i32 s5, 0, s4
+; GFX9DAGISEL-NEXT: s_ashr_i32 s4, s5, 31
+; GFX9DAGISEL-NEXT: s_mul_i32 s8, s6, s4
+; GFX9DAGISEL-NEXT: s_mul_i32 s4, s6, s5
+; GFX9DAGISEL-NEXT: s_mul_hi_u32 s6, s6, s5
+; GFX9DAGISEL-NEXT: s_mul_i32 s5, s7, s5
+; GFX9DAGISEL-NEXT: s_add_u32 s5, s6, s5
+; GFX9DAGISEL-NEXT: s_add_u32 s5, s5, s8
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9DAGISEL-NEXT: .LBB9_4: ; %endif
+; GFX9DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: divergent_cfg_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX9GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX9GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX9GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX9GISEL-NEXT: ; %bb.1: ; %else
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX9GISEL-NEXT: s_sub_i32 s7, 0, s6
+; GFX9GISEL-NEXT: s_ashr_i32 s6, s7, 31
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mul_i32 s10, s2, s6
+; GFX9GISEL-NEXT: s_mul_i32 s6, s2, s7
+; GFX9GISEL-NEXT: s_mul_hi_u32 s2, s2, s7
+; GFX9GISEL-NEXT: s_mul_i32 s3, s3, s7
+; GFX9GISEL-NEXT: s_add_u32 s2, s2, s3
+; GFX9GISEL-NEXT: s_add_u32 s7, s2, s10
+; GFX9GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX9GISEL-NEXT: ; %bb.3: ; %if
+; GFX9GISEL-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
+; GFX9GISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9GISEL-NEXT: s_sub_i32 s4, 0, s4
+; GFX9GISEL-NEXT: s_ashr_i32 s5, s4, 31
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mul_i32 s6, s8, s4
+; GFX9GISEL-NEXT: s_mul_hi_u32 s7, s8, s4
+; GFX9GISEL-NEXT: s_mul_i32 s4, s9, s4
+; GFX9GISEL-NEXT: s_mul_i32 s5, s8, s5
+; GFX9GISEL-NEXT: s_add_u32 s4, s7, s4
+; GFX9GISEL-NEXT: s_add_u32 s7, s4, s5
+; GFX9GISEL-NEXT: .LBB9_4: ; %endif
+; GFX9GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX1064DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_clause 0x1
+; GFX1064DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX1064DAGISEL-NEXT: ; implicit-def: $sgpr8_sgpr9
+; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064DAGISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1064DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[8:9], exec
+; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s8, s[8:9]
+; GFX1064DAGISEL-NEXT: s_sub_i32 s8, 0, s8
+; GFX1064DAGISEL-NEXT: s_ashr_i32 s9, s8, 31
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s10, s2, s8
+; GFX1064DAGISEL-NEXT: s_mul_i32 s3, s3, s8
+; GFX1064DAGISEL-NEXT: s_mul_i32 s9, s2, s9
+; GFX1064DAGISEL-NEXT: s_add_u32 s3, s10, s3
+; GFX1064DAGISEL-NEXT: s_mul_i32 s8, s2, s8
+; GFX1064DAGISEL-NEXT: s_add_u32 s9, s3, s9
+; GFX1064DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[4:5]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s8
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s9
+; GFX1064DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
+; GFX1064DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064DAGISEL-NEXT: s_sub_i32 s4, 0, s4
+; GFX1064DAGISEL-NEXT: s_ashr_i32 s5, s4, 31
+; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s8, s6, s4
+; GFX1064DAGISEL-NEXT: s_mul_i32 s7, s7, s4
+; GFX1064DAGISEL-NEXT: s_mul_i32 s5, s6, s5
+; GFX1064DAGISEL-NEXT: s_add_u32 s7, s8, s7
+; GFX1064DAGISEL-NEXT: s_mul_i32 s4, s6, s4
+; GFX1064DAGISEL-NEXT: s_add_u32 s5, s7, s5
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX1064DAGISEL-NEXT: ; %bb.4: ; %endif
+; GFX1064DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT: s_endpgm
+;
+; GFX1064GISEL-LABEL: divergent_cfg_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX1064GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1064GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX1064GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1064GISEL-NEXT: ; %bb.1: ; %else
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1064GISEL-NEXT: s_sub_i32 s6, 0, s6
+; GFX1064GISEL-NEXT: s_ashr_i32 s7, s6, 31
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mul_hi_u32 s10, s2, s6
+; GFX1064GISEL-NEXT: s_mul_i32 s3, s3, s6
+; GFX1064GISEL-NEXT: s_mul_i32 s7, s2, s7
+; GFX1064GISEL-NEXT: s_add_u32 s3, s10, s3
+; GFX1064GISEL-NEXT: s_mul_i32 s6, s2, s6
+; GFX1064GISEL-NEXT: s_add_u32 s7, s3, s7
+; GFX1064GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1064GISEL-NEXT: ; %bb.3: ; %if
+; GFX1064GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064GISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064GISEL-NEXT: s_sub_i32 s4, 0, s4
+; GFX1064GISEL-NEXT: s_ashr_i32 s5, s4, 31
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mul_hi_u32 s8, s6, s4
+; GFX1064GISEL-NEXT: s_mul_i32 s7, s7, s4
+; GFX1064GISEL-NEXT: s_mul_i32 s5, s6, s5
+; GFX1064GISEL-NEXT: s_add_u32 s7, s8, s7
+; GFX1064GISEL-NEXT: s_mul_i32 s6, s6, s4
+; GFX1064GISEL-NEXT: s_add_u32 s7, s7, s5
+; GFX1064GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1064GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT: s_endpgm
+;
+; GFX1032DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_clause 0x1
+; GFX1032DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc_lo, 15, v0
+; GFX1032DAGISEL-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX1032DAGISEL-NEXT: s_and_saveexec_b32 s8, vcc_lo
+; GFX1032DAGISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1032DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX1032DAGISEL-NEXT: s_mov_b32 s4, exec_lo
+; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s4, s4
+; GFX1032DAGISEL-NEXT: s_sub_i32 s4, 0, s4
+; GFX1032DAGISEL-NEXT: s_ashr_i32 s5, s4, 31
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s9, s2, s4
+; GFX1032DAGISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1032DAGISEL-NEXT: s_mul_i32 s5, s2, s5
+; GFX1032DAGISEL-NEXT: s_add_u32 s3, s9, s3
+; GFX1032DAGISEL-NEXT: s_mul_i32 s4, s2, s4
+; GFX1032DAGISEL-NEXT: s_add_u32 s5, s3, s5
+; GFX1032DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s2, s8
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX1032DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s2
+; GFX1032DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX1032DAGISEL-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1032DAGISEL-NEXT: s_sub_i32 s3, 0, s3
+; GFX1032DAGISEL-NEXT: s_ashr_i32 s4, s3, 31
+; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s5, s6, s3
+; GFX1032DAGISEL-NEXT: s_mul_i32 s7, s7, s3
+; GFX1032DAGISEL-NEXT: s_mul_i32 s8, s6, s4
+; GFX1032DAGISEL-NEXT: s_add_u32 s5, s5, s7
+; GFX1032DAGISEL-NEXT: s_mul_i32 s4, s6, s3
+; GFX1032DAGISEL-NEXT: s_add_u32 s5, s5, s8
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX1032DAGISEL-NEXT: ; %bb.4: ; %endif
+; GFX1032DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT: s_endpgm
+;
+; GFX1032GISEL-LABEL: divergent_cfg_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 16, v0
+; GFX1032GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1032GISEL-NEXT: s_and_saveexec_b32 s8, vcc_lo
+; GFX1032GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1032GISEL-NEXT: ; %bb.1: ; %else
+; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s6, s6
+; GFX1032GISEL-NEXT: s_sub_i32 s6, 0, s6
+; GFX1032GISEL-NEXT: s_ashr_i32 s7, s6, 31
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mul_hi_u32 s9, s2, s6
+; GFX1032GISEL-NEXT: s_mul_i32 s3, s3, s6
+; GFX1032GISEL-NEXT: s_mul_i32 s7, s2, s7
+; GFX1032GISEL-NEXT: s_add_u32 s3, s9, s3
+; GFX1032GISEL-NEXT: s_mul_i32 s6, s2, s6
+; GFX1032GISEL-NEXT: s_add_u32 s7, s3, s7
+; GFX1032GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s2, s8
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1032GISEL-NEXT: ; %bb.3: ; %if
+; GFX1032GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032GISEL-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1032GISEL-NEXT: s_sub_i32 s3, 0, s3
+; GFX1032GISEL-NEXT: s_ashr_i32 s4, s3, 31
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mul_hi_u32 s5, s6, s3
+; GFX1032GISEL-NEXT: s_mul_i32 s7, s7, s3
+; GFX1032GISEL-NEXT: s_mul_i32 s4, s6, s4
+; GFX1032GISEL-NEXT: s_add_u32 s5, s5, s7
+; GFX1032GISEL-NEXT: s_mul_i32 s6, s6, s3
+; GFX1032GISEL-NEXT: s_add_u32 s7, s5, s4
+; GFX1032GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1032GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_clause 0x1
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT: ; implicit-def: $sgpr8_sgpr9
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX1164DAGISEL-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
+; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1164DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[8:9], exec
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s8, s[8:9]
+; GFX1164DAGISEL-NEXT: s_sub_i32 s8, 0, s8
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: s_ashr_i32 s9, s8, 31
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s10, s2, s8
+; GFX1164DAGISEL-NEXT: s_mul_i32 s3, s3, s8
+; GFX1164DAGISEL-NEXT: s_mul_i32 s9, s2, s9
+; GFX1164DAGISEL-NEXT: s_add_u32 s3, s10, s3
+; GFX1164DAGISEL-NEXT: s_mul_i32 s8, s2, s8
+; GFX1164DAGISEL-NEXT: s_add_u32 s9, s3, s9
+; GFX1164DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[6:7]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s8
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s9
+; GFX1164DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
+; GFX1164DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: s_sub_i32 s6, 0, s6
+; GFX1164DAGISEL-NEXT: s_ashr_i32 s7, s6, 31
+; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s8, s4, s6
+; GFX1164DAGISEL-NEXT: s_mul_i32 s5, s5, s6
+; GFX1164DAGISEL-NEXT: s_mul_i32 s7, s4, s7
+; GFX1164DAGISEL-NEXT: s_add_u32 s5, s8, s5
+; GFX1164DAGISEL-NEXT: s_mul_i32 s4, s4, s6
+; GFX1164DAGISEL-NEXT: s_add_u32 s5, s5, s7
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX1164DAGISEL-NEXT: ; %bb.4: ; %endif
+; GFX1164DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: divergent_cfg_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164GISEL-NEXT: s_mov_b64 s[8:9], exec
+; GFX1164GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
+; GFX1164GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1164GISEL-NEXT: ; %bb.1: ; %else
+; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164GISEL-NEXT: s_sub_i32 s6, 0, s6
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: s_ashr_i32 s7, s6, 31
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mul_hi_u32 s10, s2, s6
+; GFX1164GISEL-NEXT: s_mul_i32 s3, s3, s6
+; GFX1164GISEL-NEXT: s_mul_i32 s7, s2, s7
+; GFX1164GISEL-NEXT: s_add_u32 s3, s10, s3
+; GFX1164GISEL-NEXT: s_mul_i32 s6, s2, s6
+; GFX1164GISEL-NEXT: s_add_u32 s7, s3, s7
+; GFX1164GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[2:3], s[8:9]
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1164GISEL-NEXT: ; %bb.3: ; %if
+; GFX1164GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164GISEL-NEXT: s_sub_i32 s6, 0, s6
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: s_ashr_i32 s7, s6, 31
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mul_hi_u32 s8, s4, s6
+; GFX1164GISEL-NEXT: s_mul_i32 s5, s5, s6
+; GFX1164GISEL-NEXT: s_mul_i32 s7, s4, s7
+; GFX1164GISEL-NEXT: s_add_u32 s5, s8, s5
+; GFX1164GISEL-NEXT: s_mul_i32 s6, s4, s6
+; GFX1164GISEL-NEXT: s_add_u32 s7, s5, s7
+; GFX1164GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1164GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_clause 0x1
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132DAGISEL-NEXT: s_mov_b32 s8, exec_lo
+; GFX1132DAGISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX1132DAGISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1132DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX1132DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s6, s6
+; GFX1132DAGISEL-NEXT: s_sub_i32 s6, 0, s6
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_ashr_i32 s7, s6, 31
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s9, s2, s6
+; GFX1132DAGISEL-NEXT: s_mul_i32 s3, s3, s6
+; GFX1132DAGISEL-NEXT: s_mul_i32 s7, s2, s7
+; GFX1132DAGISEL-NEXT: s_add_u32 s3, s9, s3
+; GFX1132DAGISEL-NEXT: s_mul_i32 s6, s2, s6
+; GFX1132DAGISEL-NEXT: s_add_u32 s7, s3, s7
+; GFX1132DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s2, s8
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s2
+; GFX1132DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_sub_i32 s3, 0, s3
+; GFX1132DAGISEL-NEXT: s_ashr_i32 s6, s3, 31
+; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s7, s4, s3
+; GFX1132DAGISEL-NEXT: s_mul_i32 s5, s5, s3
+; GFX1132DAGISEL-NEXT: s_mul_i32 s6, s4, s6
+; GFX1132DAGISEL-NEXT: s_add_u32 s5, s7, s5
+; GFX1132DAGISEL-NEXT: s_mul_i32 s4, s4, s3
+; GFX1132DAGISEL-NEXT: s_add_u32 s5, s5, s6
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX1132DAGISEL-NEXT: ; %bb.4: ; %endif
+; GFX1132DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: divergent_cfg_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132GISEL-NEXT: s_mov_b32 s8, exec_lo
+; GFX1132GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
+; GFX1132GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1132GISEL-NEXT: ; %bb.1: ; %else
+; GFX1132GISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s6, s6
+; GFX1132GISEL-NEXT: s_sub_i32 s6, 0, s6
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: s_ashr_i32 s7, s6, 31
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mul_hi_u32 s9, s2, s6
+; GFX1132GISEL-NEXT: s_mul_i32 s3, s3, s6
+; GFX1132GISEL-NEXT: s_mul_i32 s7, s2, s7
+; GFX1132GISEL-NEXT: s_add_u32 s3, s9, s3
+; GFX1132GISEL-NEXT: s_mul_i32 s6, s2, s6
+; GFX1132GISEL-NEXT: s_add_u32 s7, s3, s7
+; GFX1132GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s2, s8
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1132GISEL-NEXT: ; %bb.3: ; %if
+; GFX1132GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132GISEL-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1132GISEL-NEXT: s_sub_i32 s3, 0, s3
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: s_ashr_i32 s6, s3, 31
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mul_hi_u32 s7, s4, s3
+; GFX1132GISEL-NEXT: s_mul_i32 s5, s5, s3
+; GFX1132GISEL-NEXT: s_mul_i32 s8, s4, s6
+; GFX1132GISEL-NEXT: s_add_u32 s5, s7, s5
+; GFX1132GISEL-NEXT: s_mul_i32 s6, s4, s3
+; GFX1132GISEL-NEXT: s_add_u32 s7, s5, s8
+; GFX1132GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1132GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %d_cmp = icmp ult i32 %tid, 16
+ br i1 %d_cmp, label %if, label %else
+
+if:
+ %reducedValTid = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 %in2, i32 1)
+ br label %endif
+
+else:
+ %reducedValIn = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 %in, i32 1)
+ br label %endif
+
+endif:
+ %combine = phi i64 [%reducedValTid, %if], [%reducedValIn, %else]
+ store i64 %combine, ptr addrspace(1) %out
+ ret void
+}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GFX10DAGISEL: {{.*}}
; GFX10GISEL: {{.*}}
>From d2bb127e9f79da9ed353b6bf17c378b77e3a94a3 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Fri, 1 Aug 2025 16:45:12 +0530
Subject: [PATCH 5/9] Renaming Variables
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 56 +++++++++++------------
1 file changed, 28 insertions(+), 28 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 7668c4c2169d5..f038e75837865 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5372,39 +5372,39 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
const TargetRegisterClass *WaveMaskRegClass = TRI->getWaveMaskRegClass();
const TargetRegisterClass *DstRegClass = MRI.getRegClass(DstReg);
Register ExecMask = MRI.createVirtualRegister(WaveMaskRegClass);
- Register ActiveLanes =
+ Register NumActiveLanes =
MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
bool IsWave32 = ST.isWave32();
unsigned MovOpc = IsWave32 ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
MCRegister ExecReg = IsWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
- unsigned CountReg =
+ unsigned BitCountOpc =
IsWave32 ? AMDGPU::S_BCNT1_I32_B32 : AMDGPU::S_BCNT1_I32_B64;
- BuildMI(BB, MI, DL, TII->get(MovOpc), ExecMask).addReg(ExecReg);
-
- auto NewAccumulator =
- BuildMI(BB, MI, DL, TII->get(CountReg), ActiveLanes)
- .addReg(ExecMask);
-
- switch (Opc) {
- case AMDGPU::S_XOR_B32: {
- // Performing an XOR operation on a uniform value
- // depends on the parity of the number of active lanes.
- // For even parity, the result will be 0, for odd
- // parity the result will be the same as the input value.
- Register ParityRegister =
- MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
-
- BuildMI(BB, MI, DL, TII->get(AMDGPU::S_AND_B32), ParityRegister)
- .addReg(NewAccumulator->getOperand(0).getReg())
- .addImm(1)
- .setOperandDead(3); // Dead scc
- BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DstReg)
- .addReg(SrcReg)
- .addReg(ParityRegister);
- break;
- }
+ BuildMI(BB, MI, DL, TII->get(MovOpc), ExecMask).addReg(ExecReg);
+
+ auto NewAccumulator =
+ BuildMI(BB, MI, DL, TII->get(BitCountOpc), NumActiveLanes)
+ .addReg(ExecMask);
+
+ switch (Opc) {
+ case AMDGPU::S_XOR_B32: {
+ // Performing an XOR operation on a uniform value
+ // depends on the parity of the number of active lanes.
+ // For even parity, the result will be 0, for odd
+ // parity the result will be the same as the input value.
+ Register ParityRegister =
+ MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_AND_B32), ParityRegister)
+ .addReg(NewAccumulator->getOperand(0).getReg())
+ .addImm(1)
+ .setOperandDead(3); // Dead scc
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DstReg)
+ .addReg(SrcReg)
+ .addReg(ParityRegister);
+ break;
+ }
case AMDGPU::S_SUB_I32: {
Register NegatedVal = MRI.createVirtualRegister(DstRegClass);
@@ -5637,8 +5637,8 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
.addReg(Accumulator->getOperand(0).getReg());
break;
}
- case ::AMDGPU::S_ADD_U64_PSEUDO:
- case ::AMDGPU::S_SUB_U64_PSEUDO: {
+ case AMDGPU::S_ADD_U64_PSEUDO:
+ case AMDGPU::S_SUB_U64_PSEUDO: {
unsigned newOpc1 = Opc == AMDGPU::S_ADD_U64_PSEUDO ? AMDGPU::S_ADD_U32
: AMDGPU::S_SUB_U32;
unsigned newOpc2 = Opc == AMDGPU::S_ADD_U64_PSEUDO ? AMDGPU::S_ADDC_U32
>From 0a63b2503881923a20ec672d46a5ac66db41d188 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Wed, 13 Aug 2025 18:22:54 +0530
Subject: [PATCH 6/9] Marking dead scc
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 19 ++++----
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll | 44 +++++++------------
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll | 44 +++++++------------
3 files changed, 43 insertions(+), 64 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index f038e75837865..36533cf240f96 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5281,9 +5281,7 @@ static uint32_t getIdentityValueFor32BitWaveReduction(unsigned Opc) {
case AMDGPU::S_MAX_I32:
return std::numeric_limits<int32_t>::min();
case AMDGPU::S_ADD_I32:
- case AMDGPU::S_ADD_U64_PSEUDO:
case AMDGPU::S_SUB_I32:
- case AMDGPU::S_SUB_U64_PSEUDO:
case AMDGPU::S_OR_B32:
case AMDGPU::S_XOR_B32:
return std::numeric_limits<uint32_t>::min();
@@ -5305,6 +5303,9 @@ static uint64_t getIdentityValueFor64BitWaveReduction(unsigned Opc) {
return std::numeric_limits<uint64_t>::min();
case AMDGPU::V_CMP_GT_I64_e64: // max.i64
return std::numeric_limits<int64_t>::min();
+ case AMDGPU::S_ADD_U64_PSEUDO:
+ case AMDGPU::S_SUB_U64_PSEUDO:
+ return std::numeric_limits<uint64_t>::min();
default:
llvm_unreachable(
"Unexpected opcode in getIdentityValueFor64BitWaveReduction");
@@ -5450,7 +5451,8 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
if (Opc == AMDGPU::S_SUB_U64_PSEUDO) {
BuildMI(BB, MI, DL, TII->get(AMDGPU::S_SUB_I32), NegatedValLo)
.addImm(0)
- .addReg(NewAccumulator->getOperand(0).getReg());
+ .addReg(NewAccumulator->getOperand(0).getReg())
+ .setOperandDead(3); // Dead scc
BuildMI(BB, MI, DL, TII->get(AMDGPU::S_ASHR_I32), NegatedValHi)
.addReg(NegatedValLo)
.addImm(31)
@@ -5639,9 +5641,9 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
}
case AMDGPU::S_ADD_U64_PSEUDO:
case AMDGPU::S_SUB_U64_PSEUDO: {
- unsigned newOpc1 = Opc == AMDGPU::S_ADD_U64_PSEUDO ? AMDGPU::S_ADD_U32
+ unsigned NewOpc1 = Opc == AMDGPU::S_ADD_U64_PSEUDO ? AMDGPU::S_ADD_U32
: AMDGPU::S_SUB_U32;
- unsigned newOpc2 = Opc == AMDGPU::S_ADD_U64_PSEUDO ? AMDGPU::S_ADDC_U32
+ unsigned NewOpc2 = Opc == AMDGPU::S_ADD_U64_PSEUDO ? AMDGPU::S_ADDC_U32
: AMDGPU::S_SUBB_U32;
Register DestLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
Register DestHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
@@ -5651,12 +5653,13 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
MachineOperand Accumhi = TII->buildExtractSubRegOrImm(
MI, MRI, Accumulator->getOperand(0), DstRegClass, AMDGPU::sub1,
&AMDGPU::SReg_32RegClass);
- BuildMI(*ComputeLoop, I, DL, TII->get(newOpc1), DestLo)
+ BuildMI(*ComputeLoop, I, DL, TII->get(NewOpc1), DestLo)
.add(Accumlo)
.addReg(LaneValueLo->getOperand(0).getReg());
- BuildMI(*ComputeLoop, I, DL, TII->get(newOpc2), DestHi)
+ BuildMI(*ComputeLoop, I, DL, TII->get(NewOpc2), DestHi)
.add(Accumhi)
- .addReg(LaneValueHi->getOperand(0).getReg());
+ .addReg(LaneValueHi->getOperand(0).getReg())
+ .setOperandDead(3); // Dead scc
NewAccumulator = BuildMI(*ComputeLoop, I, DL,
TII->get(TargetOpcode::REG_SEQUENCE), DstReg)
.addReg(DestLo)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
index b6af8b4bb798d..0f38c69fe1786 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
@@ -1833,9 +1833,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8DAGISEL-LABEL: divergent_value_i64:
; GFX8DAGISEL: ; %bb.0: ; %entry
; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX8DAGISEL-NEXT: s_mov_b64 s[4:5], 0
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX8DAGISEL-NEXT: s_mov_b32 s5, s4
; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
; GFX8DAGISEL-NEXT: v_readlane_b32 s9, v2, s8
@@ -1855,9 +1854,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8GISEL-LABEL: divergent_value_i64:
; GFX8GISEL: ; %bb.0: ; %entry
; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8GISEL-NEXT: s_mov_b32 s4, 0
+; GFX8GISEL-NEXT: s_mov_b64 s[4:5], 0
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX8GISEL-NEXT: s_mov_b32 s5, s4
; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8GISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
; GFX8GISEL-NEXT: v_readlane_b32 s9, v2, s8
@@ -1877,9 +1875,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9DAGISEL-LABEL: divergent_value_i64:
; GFX9DAGISEL: ; %bb.0: ; %entry
; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], 0
; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX9DAGISEL-NEXT: s_mov_b32 s5, s4
; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
; GFX9DAGISEL-NEXT: v_readlane_b32 s9, v2, s8
@@ -1899,9 +1896,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9GISEL-LABEL: divergent_value_i64:
; GFX9GISEL: ; %bb.0: ; %entry
; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9GISEL-NEXT: s_mov_b32 s4, 0
+; GFX9GISEL-NEXT: s_mov_b64 s[4:5], 0
; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX9GISEL-NEXT: s_mov_b32 s5, s4
; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9GISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
; GFX9GISEL-NEXT: v_readlane_b32 s9, v2, s8
@@ -1921,9 +1917,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064DAGISEL-LABEL: divergent_value_i64:
; GFX1064DAGISEL: ; %bb.0: ; %entry
; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1064DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], 0
; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX1064DAGISEL-NEXT: s_mov_b32 s5, s4
; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
; GFX1064DAGISEL-NEXT: v_readlane_b32 s9, v2, s8
@@ -1942,9 +1937,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064GISEL-LABEL: divergent_value_i64:
; GFX1064GISEL: ; %bb.0: ; %entry
; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1064GISEL-NEXT: s_mov_b32 s4, 0
+; GFX1064GISEL-NEXT: s_mov_b64 s[4:5], 0
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX1064GISEL-NEXT: s_mov_b32 s5, s4
; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064GISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
; GFX1064GISEL-NEXT: v_readlane_b32 s9, v2, s8
@@ -1963,9 +1957,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032DAGISEL-LABEL: divergent_value_i64:
; GFX1032DAGISEL: ; %bb.0: ; %entry
; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1032DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX1032DAGISEL-NEXT: s_mov_b64 s[4:5], 0
; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
-; GFX1032DAGISEL-NEXT: s_mov_b32 s5, s4
; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032DAGISEL-NEXT: v_readlane_b32 s8, v2, s7
@@ -1984,9 +1977,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032GISEL-LABEL: divergent_value_i64:
; GFX1032GISEL: ; %bb.0: ; %entry
; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1032GISEL-NEXT: s_mov_b32 s4, 0
+; GFX1032GISEL-NEXT: s_mov_b64 s[4:5], 0
; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
-; GFX1032GISEL-NEXT: s_mov_b32 s5, s4
; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032GISEL-NEXT: v_readlane_b32 s8, v2, s7
@@ -2005,12 +1997,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164DAGISEL-LABEL: divergent_value_i64:
; GFX1164DAGISEL: ; %bb.0: ; %entry
; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1164DAGISEL-NEXT: s_mov_b32 s0, 0
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[0:1], 0
; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT: s_mov_b32 s1, s0
; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s4, s[2:3]
-; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v2, s4
; GFX1164DAGISEL-NEXT: v_readlane_b32 s6, v3, s4
; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s4
@@ -2027,12 +2018,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164GISEL-LABEL: divergent_value_i64:
; GFX1164GISEL: ; %bb.0: ; %entry
; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1164GISEL-NEXT: s_mov_b32 s0, 0
+; GFX1164GISEL-NEXT: s_mov_b64 s[0:1], 0
; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT: s_mov_b32 s1, s0
; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164GISEL-NEXT: s_ctz_i32_b64 s4, s[2:3]
-; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1164GISEL-NEXT: v_readlane_b32 s5, v2, s4
; GFX1164GISEL-NEXT: v_readlane_b32 s6, v3, s4
; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s4
@@ -2049,12 +2039,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132DAGISEL-LABEL: divergent_value_i64:
; GFX1132DAGISEL: ; %bb.0: ; %entry
; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1132DAGISEL-NEXT: s_mov_b32 s0, 0
+; GFX1132DAGISEL-NEXT: s_mov_b64 s[0:1], 0
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT: s_mov_b32 s1, s0
; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
-; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
@@ -2070,12 +2059,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132GISEL-LABEL: divergent_value_i64:
; GFX1132GISEL: ; %bb.0: ; %entry
; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1132GISEL-NEXT: s_mov_b32 s0, 0
+; GFX1132GISEL-NEXT: s_mov_b64 s[0:1], 0
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT: s_mov_b32 s1, s0
; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
-; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
index 9a0917133fc59..c60230e2d3092 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
@@ -2036,9 +2036,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8DAGISEL-LABEL: divergent_value_i64:
; GFX8DAGISEL: ; %bb.0: ; %entry
; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX8DAGISEL-NEXT: s_mov_b64 s[4:5], 0
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX8DAGISEL-NEXT: s_mov_b32 s5, s4
; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
; GFX8DAGISEL-NEXT: v_readlane_b32 s9, v2, s8
@@ -2058,9 +2057,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8GISEL-LABEL: divergent_value_i64:
; GFX8GISEL: ; %bb.0: ; %entry
; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8GISEL-NEXT: s_mov_b32 s4, 0
+; GFX8GISEL-NEXT: s_mov_b64 s[4:5], 0
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX8GISEL-NEXT: s_mov_b32 s5, s4
; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8GISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
; GFX8GISEL-NEXT: v_readlane_b32 s9, v2, s8
@@ -2080,9 +2078,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9DAGISEL-LABEL: divergent_value_i64:
; GFX9DAGISEL: ; %bb.0: ; %entry
; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], 0
; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX9DAGISEL-NEXT: s_mov_b32 s5, s4
; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
; GFX9DAGISEL-NEXT: v_readlane_b32 s9, v2, s8
@@ -2102,9 +2099,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9GISEL-LABEL: divergent_value_i64:
; GFX9GISEL: ; %bb.0: ; %entry
; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9GISEL-NEXT: s_mov_b32 s4, 0
+; GFX9GISEL-NEXT: s_mov_b64 s[4:5], 0
; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX9GISEL-NEXT: s_mov_b32 s5, s4
; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9GISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
; GFX9GISEL-NEXT: v_readlane_b32 s9, v2, s8
@@ -2124,9 +2120,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064DAGISEL-LABEL: divergent_value_i64:
; GFX1064DAGISEL: ; %bb.0: ; %entry
; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1064DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], 0
; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX1064DAGISEL-NEXT: s_mov_b32 s5, s4
; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
; GFX1064DAGISEL-NEXT: v_readlane_b32 s9, v2, s8
@@ -2145,9 +2140,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064GISEL-LABEL: divergent_value_i64:
; GFX1064GISEL: ; %bb.0: ; %entry
; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1064GISEL-NEXT: s_mov_b32 s4, 0
+; GFX1064GISEL-NEXT: s_mov_b64 s[4:5], 0
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX1064GISEL-NEXT: s_mov_b32 s5, s4
; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064GISEL-NEXT: s_ff1_i32_b64 s8, s[6:7]
; GFX1064GISEL-NEXT: v_readlane_b32 s9, v2, s8
@@ -2166,9 +2160,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032DAGISEL-LABEL: divergent_value_i64:
; GFX1032DAGISEL: ; %bb.0: ; %entry
; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1032DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX1032DAGISEL-NEXT: s_mov_b64 s[4:5], 0
; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
-; GFX1032DAGISEL-NEXT: s_mov_b32 s5, s4
; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032DAGISEL-NEXT: v_readlane_b32 s8, v2, s7
@@ -2187,9 +2180,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032GISEL-LABEL: divergent_value_i64:
; GFX1032GISEL: ; %bb.0: ; %entry
; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1032GISEL-NEXT: s_mov_b32 s4, 0
+; GFX1032GISEL-NEXT: s_mov_b64 s[4:5], 0
; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
-; GFX1032GISEL-NEXT: s_mov_b32 s5, s4
; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032GISEL-NEXT: v_readlane_b32 s8, v2, s7
@@ -2208,12 +2200,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164DAGISEL-LABEL: divergent_value_i64:
; GFX1164DAGISEL: ; %bb.0: ; %entry
; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1164DAGISEL-NEXT: s_mov_b32 s0, 0
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[0:1], 0
; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT: s_mov_b32 s1, s0
; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s4, s[2:3]
-; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v2, s4
; GFX1164DAGISEL-NEXT: v_readlane_b32 s6, v3, s4
; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s4
@@ -2230,12 +2221,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164GISEL-LABEL: divergent_value_i64:
; GFX1164GISEL: ; %bb.0: ; %entry
; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1164GISEL-NEXT: s_mov_b32 s0, 0
+; GFX1164GISEL-NEXT: s_mov_b64 s[0:1], 0
; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT: s_mov_b32 s1, s0
; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164GISEL-NEXT: s_ctz_i32_b64 s4, s[2:3]
-; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1164GISEL-NEXT: v_readlane_b32 s5, v2, s4
; GFX1164GISEL-NEXT: v_readlane_b32 s6, v3, s4
; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s4
@@ -2252,12 +2242,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132DAGISEL-LABEL: divergent_value_i64:
; GFX1132DAGISEL: ; %bb.0: ; %entry
; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1132DAGISEL-NEXT: s_mov_b32 s0, 0
+; GFX1132DAGISEL-NEXT: s_mov_b64 s[0:1], 0
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT: s_mov_b32 s1, s0
; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
-; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
@@ -2273,12 +2262,11 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132GISEL-LABEL: divergent_value_i64:
; GFX1132GISEL: ; %bb.0: ; %entry
; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1132GISEL-NEXT: s_mov_b32 s0, 0
+; GFX1132GISEL-NEXT: s_mov_b64 s[0:1], 0
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT: s_mov_b32 s1, s0
; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
-; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
>From 26c19f76cbd0a133b3c5502f2c5f478977bec9d7 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Wed, 20 Aug 2025 14:55:18 +0530
Subject: [PATCH 7/9] Checking for targets with native 64-bit `add`/`sub`
support
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 62 +++--
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll | 219 +++++++++++++++
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll | 250 ++++++++++++++++++
3 files changed, 506 insertions(+), 25 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 36533cf240f96..55117e09873e3 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5641,31 +5641,43 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
}
case AMDGPU::S_ADD_U64_PSEUDO:
case AMDGPU::S_SUB_U64_PSEUDO: {
- unsigned NewOpc1 = Opc == AMDGPU::S_ADD_U64_PSEUDO ? AMDGPU::S_ADD_U32
- : AMDGPU::S_SUB_U32;
- unsigned NewOpc2 = Opc == AMDGPU::S_ADD_U64_PSEUDO ? AMDGPU::S_ADDC_U32
- : AMDGPU::S_SUBB_U32;
- Register DestLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
- Register DestHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
- MachineOperand Accumlo = TII->buildExtractSubRegOrImm(
- MI, MRI, Accumulator->getOperand(0), DstRegClass, AMDGPU::sub0,
- &AMDGPU::SReg_32RegClass);
- MachineOperand Accumhi = TII->buildExtractSubRegOrImm(
- MI, MRI, Accumulator->getOperand(0), DstRegClass, AMDGPU::sub1,
- &AMDGPU::SReg_32RegClass);
- BuildMI(*ComputeLoop, I, DL, TII->get(NewOpc1), DestLo)
- .add(Accumlo)
- .addReg(LaneValueLo->getOperand(0).getReg());
- BuildMI(*ComputeLoop, I, DL, TII->get(NewOpc2), DestHi)
- .add(Accumhi)
- .addReg(LaneValueHi->getOperand(0).getReg())
- .setOperandDead(3); // Dead scc
- NewAccumulator = BuildMI(*ComputeLoop, I, DL,
- TII->get(TargetOpcode::REG_SEQUENCE), DstReg)
- .addReg(DestLo)
- .addImm(AMDGPU::sub0)
- .addReg(DestHi)
- .addImm(AMDGPU::sub1);
+ if (ST.hasScalarAddSub64()) {
+ NewAccumulator = BuildMI(*ComputeLoop, I, DL,
+ TII->get(Opc == AMDGPU::S_ADD_U64_PSEUDO
+ ? AMDGPU::S_ADD_U64
+ : AMDGPU::S_SUB_U64),
+ DstReg)
+ .addReg(Accumulator->getOperand(0).getReg())
+ .addReg(LaneValue->getOperand(0).getReg());
+ } else {
+ unsigned NewOpc1 = Opc == AMDGPU::S_ADD_U64_PSEUDO
+ ? AMDGPU::S_ADD_U32
+ : AMDGPU::S_SUB_U32;
+ unsigned NewOpc2 = Opc == AMDGPU::S_ADD_U64_PSEUDO
+ ? AMDGPU::S_ADDC_U32
+ : AMDGPU::S_SUBB_U32;
+ Register DestLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ Register DestHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ MachineOperand Accumlo = TII->buildExtractSubRegOrImm(
+ MI, MRI, Accumulator->getOperand(0), DstRegClass, AMDGPU::sub0,
+ &AMDGPU::SReg_32RegClass);
+ MachineOperand Accumhi = TII->buildExtractSubRegOrImm(
+ MI, MRI, Accumulator->getOperand(0), DstRegClass, AMDGPU::sub1,
+ &AMDGPU::SReg_32RegClass);
+ BuildMI(*ComputeLoop, I, DL, TII->get(NewOpc1), DestLo)
+ .add(Accumlo)
+ .addReg(LaneValueLo->getOperand(0).getReg());
+ BuildMI(*ComputeLoop, I, DL, TII->get(NewOpc2), DestHi)
+ .add(Accumhi)
+ .addReg(LaneValueHi->getOperand(0).getReg())
+ .setOperandDead(3); // Dead scc
+ NewAccumulator = BuildMI(*ComputeLoop, I, DL,
+ TII->get(TargetOpcode::REG_SEQUENCE), DstReg)
+ .addReg(DestLo)
+ .addImm(AMDGPU::sub0)
+ .addReg(DestHi)
+ .addImm(AMDGPU::sub1);
+ }
break;
}
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
index 0f38c69fe1786..5f303e30c923b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
@@ -11,6 +11,7 @@
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX11GISEL,GFX1164GISEL %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=0 < %s | FileCheck -check-prefixes=GFX11DAGISEL,GFX1132DAGISEL %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 < %s | FileCheck -check-prefixes=GFX11GISEL,GFX1132GISEL %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -global-isel=0 < %s | FileCheck -check-prefixes=GFX12DAGISEL %s
define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) {
; GFX8DAGISEL-LABEL: uniform_value:
@@ -181,6 +182,18 @@ define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) {
; GFX1132GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1132GISEL-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: uniform_value:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: s_mov_b32 s3, exec_lo
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: s_mul_i32 s2, s2, s3
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; GFX12DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
entry:
%result = call i32 @llvm.amdgcn.wave.reduce.add.i32(i32 %in, i32 1)
store i32 %result, ptr addrspace(1) %out
@@ -337,6 +350,19 @@ define amdgpu_kernel void @const_value(ptr addrspace(1) %out) {
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132GISEL-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: const_value:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX12DAGISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
entry:
%result = call i32 @llvm.amdgcn.wave.reduce.add.i32(i32 123, i32 1)
store i32 %result, ptr addrspace(1) %out
@@ -492,6 +518,18 @@ define amdgpu_kernel void @poison_value(ptr addrspace(1) %out, i32 %in) {
; GFX1132GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1132GISEL-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: poison_value:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; GFX12DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
entry:
%result = call i32 @llvm.amdgcn.wave.reduce.add.i32(i32 poison, i32 1)
store i32 %result, ptr addrspace(1) %out
@@ -734,6 +772,26 @@ define amdgpu_kernel void @divergent_value(ptr addrspace(1) %out) {
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132GISEL-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: divergent_value:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12DAGISEL-NEXT: s_mov_b32 s3, exec_lo
+; GFX12DAGISEL-NEXT: s_mov_b32 s2, 0
+; GFX12DAGISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1
+; GFX12DAGISEL-NEXT: s_ctz_i32_b32 s4, s3
+; GFX12DAGISEL-NEXT: s_wait_alu 0xfffe
+; GFX12DAGISEL-NEXT: v_readlane_b32 s5, v0, s4
+; GFX12DAGISEL-NEXT: s_bitset0_b32 s3, s4
+; GFX12DAGISEL-NEXT: s_add_co_i32 s2, s2, s5
+; GFX12DAGISEL-NEXT: s_cmp_lg_u32 s3, 0
+; GFX12DAGISEL-NEXT: s_cbranch_scc1 .LBB3_1
+; GFX12DAGISEL-NEXT: ; %bb.2:
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: global_store_b32 v1, v0, s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
entry:
%id.x = call i32 @llvm.amdgcn.workitem.id.x()
%result = call i32 @llvm.amdgcn.wave.reduce.add.i32(i32 %id.x, i32 1)
@@ -1208,6 +1266,50 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132GISEL-NEXT: global_store_b32 v1, v0, s[2:3]
; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: divergent_cfg:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12DAGISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX12DAGISEL-NEXT: ; implicit-def: $sgpr1
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX12DAGISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB4_2
+; GFX12DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX12DAGISEL-NEXT: s_load_b32 s1, s[4:5], 0x2c
+; GFX12DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX12DAGISEL-NEXT: ; implicit-def: $vgpr0
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: s_mul_i32 s1, s1, s2
+; GFX12DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX12DAGISEL-NEXT: s_or_saveexec_b32 s0, s0
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX12DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB4_6
+; GFX12DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX12DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX12DAGISEL-NEXT: s_mov_b32 s1, 0
+; GFX12DAGISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1
+; GFX12DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX12DAGISEL-NEXT: s_wait_alu 0xfffe
+; GFX12DAGISEL-NEXT: v_readlane_b32 s6, v0, s3
+; GFX12DAGISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX12DAGISEL-NEXT: s_add_co_i32 s1, s1, s6
+; GFX12DAGISEL-NEXT: s_wait_alu 0xfffe
+; GFX12DAGISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX12DAGISEL-NEXT: s_cbranch_scc1 .LBB4_4
+; GFX12DAGISEL-NEXT: ; %bb.5:
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX12DAGISEL-NEXT: .LBB4_6: ; %endif
+; GFX12DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%d_cmp = icmp ult i32 %tid, 16
@@ -1421,6 +1523,22 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: uniform_value_i64:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: s_mov_b32 s4, exec_lo
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_bcnt1_i32_b32 s4, s4
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: s_mul_hi_u32 s5, s2, s4
+; GFX12DAGISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX12DAGISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX12DAGISEL-NEXT: s_add_co_u32 s3, s5, s3
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX12DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
entry:
%result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 %in, i32 1)
store i64 %result, ptr addrspace(1) %out
@@ -1623,6 +1741,22 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: const_value_i64:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX12DAGISEL-NEXT: s_mul_hi_u32 s3, 0x7b, s2
+; GFX12DAGISEL-NEXT: s_mul_i32 s4, s2, 0
+; GFX12DAGISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX12DAGISEL-NEXT: s_add_co_u32 s3, s3, s4
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
entry:
%result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 123, i32 1)
store i64 %result, ptr addrspace(1) %out
@@ -1823,6 +1957,22 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: poison_value_i64:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: s_mul_hi_u32 s3, s0, s2
+; GFX12DAGISEL-NEXT: s_mul_i32 s4, s1, s2
+; GFX12DAGISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX12DAGISEL-NEXT: s_add_co_u32 s3, s3, s4
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX12DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
entry:
%result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 poison, i32 1)
store i64 %result, ptr addrspace(1) %out
@@ -2075,6 +2225,32 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132GISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX1132GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX1132GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12DAGISEL-LABEL: divergent_value_i64:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_expcnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: s_mov_b64 s[0:1], 0
+; GFX12DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX12DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX12DAGISEL-NEXT: s_wait_alu 0xfffe
+; GFX12DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX12DAGISEL-NEXT: s_wait_alu 0xfffe
+; GFX12DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX12DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX12DAGISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX12DAGISEL-NEXT: s_wait_alu 0xfffe
+; GFX12DAGISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX12DAGISEL-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
+; GFX12DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX12DAGISEL-NEXT: ; %bb.2:
+; GFX12DAGISEL-NEXT: s_wait_alu 0xfffe
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX12DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX12DAGISEL-NEXT: s_setpc_b64 s[30:31]
entry:
%result = call i64 @llvm.amdgcn.wave.reduce.add.i64(i64 %id.x, i32 1)
store i64 %result, ptr addrspace(1) %out
@@ -2552,6 +2728,49 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: divergent_cfg_i64:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_clause 0x1
+; GFX12DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX12DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12DAGISEL-NEXT: s_mov_b32 s8, exec_lo
+; GFX12DAGISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX12DAGISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX12DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX12DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_bcnt1_i32_b32 s6, s6
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: s_mul_hi_u32 s7, s2, s6
+; GFX12DAGISEL-NEXT: s_mul_i32 s3, s3, s6
+; GFX12DAGISEL-NEXT: s_mul_i32 s6, s2, s6
+; GFX12DAGISEL-NEXT: s_add_co_u32 s7, s7, s3
+; GFX12DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: s_or_saveexec_b32 s2, s8
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX12DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s2
+; GFX12DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_mov_b32 s3, exec_lo
+; GFX12DAGISEL-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_mul_hi_u32 s6, s4, s3
+; GFX12DAGISEL-NEXT: s_mul_i32 s5, s5, s3
+; GFX12DAGISEL-NEXT: s_mul_i32 s4, s4, s3
+; GFX12DAGISEL-NEXT: s_wait_alu 0xfffe
+; GFX12DAGISEL-NEXT: s_add_co_u32 s5, s6, s5
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX12DAGISEL-NEXT: ; %bb.4: ; %endif
+; GFX12DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX12DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%d_cmp = icmp ult i32 %tid, 16
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
index c60230e2d3092..bc8bf7f4b3b6f 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.sub.ll
@@ -11,6 +11,7 @@
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX11GISEL,GFX1164GISEL %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=0 < %s | FileCheck -check-prefixes=GFX11DAGISEL,GFX1132DAGISEL %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 < %s | FileCheck -check-prefixes=GFX11GISEL,GFX1132GISEL %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -global-isel=0 < %s | FileCheck -check-prefixes=GFX12DAGISEL %s
define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) {
; GFX8DAGISEL-LABEL: uniform_value:
@@ -194,6 +195,20 @@ define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) {
; GFX1132GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1132GISEL-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: uniform_value:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: s_mov_b32 s3, exec_lo
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: s_sub_co_i32 s2, 0, s2
+; GFX12DAGISEL-NEXT: s_mul_i32 s2, s2, s3
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; GFX12DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
entry:
%result = call i32 @llvm.amdgcn.wave.reduce.sub.i32(i32 %in, i32 1)
store i32 %result, ptr addrspace(1) %out
@@ -361,6 +376,19 @@ define amdgpu_kernel void @const_value(ptr addrspace(1) %out) {
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132GISEL-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: const_value:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX12DAGISEL-NEXT: s_sub_co_i32 s3, 0, 0x7b
+; GFX12DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_mul_i32 s2, s3, s2
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
entry:
%result = call i32 @llvm.amdgcn.wave.reduce.sub.i32(i32 123, i32 1)
store i32 %result, ptr addrspace(1) %out
@@ -529,6 +557,20 @@ define amdgpu_kernel void @poison_value(ptr addrspace(1) %out, i32 %in) {
; GFX1132GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1132GISEL-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: poison_value:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: s_sub_co_i32 s3, 0, s0
+; GFX12DAGISEL-NEXT: s_mul_i32 s2, s3, s2
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; GFX12DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
entry:
%result = call i32 @llvm.amdgcn.wave.reduce.sub.i32(i32 poison, i32 1)
store i32 %result, ptr addrspace(1) %out
@@ -771,6 +813,26 @@ define amdgpu_kernel void @divergent_value(ptr addrspace(1) %out) {
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132GISEL-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: divergent_value:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12DAGISEL-NEXT: s_mov_b32 s3, exec_lo
+; GFX12DAGISEL-NEXT: s_mov_b32 s2, 0
+; GFX12DAGISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1
+; GFX12DAGISEL-NEXT: s_ctz_i32_b32 s4, s3
+; GFX12DAGISEL-NEXT: s_wait_alu 0xfffe
+; GFX12DAGISEL-NEXT: v_readlane_b32 s5, v0, s4
+; GFX12DAGISEL-NEXT: s_bitset0_b32 s3, s4
+; GFX12DAGISEL-NEXT: s_sub_co_i32 s2, s2, s5
+; GFX12DAGISEL-NEXT: s_cmp_lg_u32 s3, 0
+; GFX12DAGISEL-NEXT: s_cbranch_scc1 .LBB3_1
+; GFX12DAGISEL-NEXT: ; %bb.2:
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: global_store_b32 v1, v0, s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
entry:
%id.x = call i32 @llvm.amdgcn.workitem.id.x()
%result = call i32 @llvm.amdgcn.wave.reduce.sub.i32(i32 %id.x, i32 1)
@@ -1257,6 +1319,51 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132GISEL-NEXT: global_store_b32 v1, v0, s[2:3]
; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: divergent_cfg:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12DAGISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX12DAGISEL-NEXT: ; implicit-def: $sgpr1
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX12DAGISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB4_2
+; GFX12DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX12DAGISEL-NEXT: s_load_b32 s1, s[4:5], 0x2c
+; GFX12DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX12DAGISEL-NEXT: ; implicit-def: $vgpr0
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: s_sub_co_i32 s1, 0, s1
+; GFX12DAGISEL-NEXT: s_mul_i32 s1, s1, s2
+; GFX12DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX12DAGISEL-NEXT: s_or_saveexec_b32 s0, s0
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX12DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB4_6
+; GFX12DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX12DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX12DAGISEL-NEXT: s_mov_b32 s1, 0
+; GFX12DAGISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1
+; GFX12DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX12DAGISEL-NEXT: s_wait_alu 0xfffe
+; GFX12DAGISEL-NEXT: v_readlane_b32 s6, v0, s3
+; GFX12DAGISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX12DAGISEL-NEXT: s_sub_co_i32 s1, s1, s6
+; GFX12DAGISEL-NEXT: s_wait_alu 0xfffe
+; GFX12DAGISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX12DAGISEL-NEXT: s_cbranch_scc1 .LBB4_4
+; GFX12DAGISEL-NEXT: ; %bb.5:
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX12DAGISEL-NEXT: .LBB4_6: ; %endif
+; GFX12DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%d_cmp = icmp ult i32 %tid, 16
@@ -1522,6 +1629,27 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: uniform_value_i64:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: s_mov_b32 s4, exec_lo
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_bcnt1_i32_b32 s4, s4
+; GFX12DAGISEL-NEXT: s_sub_co_i32 s4, 0, s4
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_ashr_i32 s5, s4, 31
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: s_mul_hi_u32 s6, s2, s4
+; GFX12DAGISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX12DAGISEL-NEXT: s_mul_i32 s5, s2, s5
+; GFX12DAGISEL-NEXT: s_add_co_u32 s3, s6, s3
+; GFX12DAGISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX12DAGISEL-NEXT: s_add_co_u32 s3, s3, s5
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX12DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
entry:
%result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 %in, i32 1)
store i64 %result, ptr addrspace(1) %out
@@ -1774,6 +1902,27 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: const_value_i64:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX12DAGISEL-NEXT: s_sub_co_i32 s2, 0, s2
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_ashr_i32 s3, s2, 31
+; GFX12DAGISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
+; GFX12DAGISEL-NEXT: s_mul_i32 s5, s2, 0
+; GFX12DAGISEL-NEXT: s_mulk_i32 s3, 0x7b
+; GFX12DAGISEL-NEXT: s_add_co_u32 s4, s4, s5
+; GFX12DAGISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX12DAGISEL-NEXT: s_add_co_u32 s3, s4, s3
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
entry:
%result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 123, i32 1)
store i64 %result, ptr addrspace(1) %out
@@ -2026,6 +2175,27 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: poison_value_i64:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX12DAGISEL-NEXT: s_sub_co_i32 s2, 0, s2
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_ashr_i32 s3, s2, 31
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: s_mul_hi_u32 s4, s0, s2
+; GFX12DAGISEL-NEXT: s_mul_i32 s5, s1, s2
+; GFX12DAGISEL-NEXT: s_mul_i32 s3, s0, s3
+; GFX12DAGISEL-NEXT: s_add_co_u32 s4, s4, s5
+; GFX12DAGISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX12DAGISEL-NEXT: s_add_co_u32 s3, s4, s3
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX12DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
entry:
%result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 poison, i32 1)
store i64 %result, ptr addrspace(1) %out
@@ -2278,6 +2448,32 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132GISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX1132GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX1132GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12DAGISEL-LABEL: divergent_value_i64:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_expcnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: s_mov_b64 s[0:1], 0
+; GFX12DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX12DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX12DAGISEL-NEXT: s_wait_alu 0xfffe
+; GFX12DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX12DAGISEL-NEXT: s_wait_alu 0xfffe
+; GFX12DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX12DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX12DAGISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX12DAGISEL-NEXT: s_wait_alu 0xfffe
+; GFX12DAGISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX12DAGISEL-NEXT: s_sub_nc_u64 s[0:1], s[0:1], s[4:5]
+; GFX12DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX12DAGISEL-NEXT: ; %bb.2:
+; GFX12DAGISEL-NEXT: s_wait_alu 0xfffe
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX12DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX12DAGISEL-NEXT: s_setpc_b64 s[30:31]
entry:
%result = call i64 @llvm.amdgcn.wave.reduce.sub.i64(i64 %id.x, i32 1)
store i64 %result, ptr addrspace(1) %out
@@ -2860,6 +3056,60 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: divergent_cfg_i64:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_clause 0x1
+; GFX12DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX12DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12DAGISEL-NEXT: s_mov_b32 s8, exec_lo
+; GFX12DAGISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX12DAGISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX12DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX12DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_bcnt1_i32_b32 s6, s6
+; GFX12DAGISEL-NEXT: s_sub_co_i32 s6, 0, s6
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_ashr_i32 s7, s6, 31
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: s_mul_hi_u32 s9, s2, s6
+; GFX12DAGISEL-NEXT: s_mul_i32 s3, s3, s6
+; GFX12DAGISEL-NEXT: s_mul_i32 s7, s2, s7
+; GFX12DAGISEL-NEXT: s_add_co_u32 s3, s9, s3
+; GFX12DAGISEL-NEXT: s_mul_i32 s6, s2, s6
+; GFX12DAGISEL-NEXT: s_add_co_u32 s7, s3, s7
+; GFX12DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: s_or_saveexec_b32 s2, s8
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX12DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s2
+; GFX12DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_mov_b32 s3, exec_lo
+; GFX12DAGISEL-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: s_sub_co_i32 s3, 0, s3
+; GFX12DAGISEL-NEXT: s_ashr_i32 s6, s3, 31
+; GFX12DAGISEL-NEXT: s_mul_hi_u32 s7, s4, s3
+; GFX12DAGISEL-NEXT: s_mul_i32 s5, s5, s3
+; GFX12DAGISEL-NEXT: s_wait_alu 0xfffe
+; GFX12DAGISEL-NEXT: s_mul_i32 s6, s4, s6
+; GFX12DAGISEL-NEXT: s_add_co_u32 s5, s7, s5
+; GFX12DAGISEL-NEXT: s_mul_i32 s4, s4, s3
+; GFX12DAGISEL-NEXT: s_wait_alu 0xfffe
+; GFX12DAGISEL-NEXT: s_add_co_u32 s5, s5, s6
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX12DAGISEL-NEXT: ; %bb.4: ; %endif
+; GFX12DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX12DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%d_cmp = icmp ult i32 %tid, 16
>From abee4b51effe5a046508729e974caa4fb371ea6d Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Thu, 21 Aug 2025 14:58:43 +0530
Subject: [PATCH 8/9] Adding helper function for expanding arithmetic ops.
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 146 +++++++++-------------
1 file changed, 58 insertions(+), 88 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 55117e09873e3..a85ad6e6ee822 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5270,6 +5270,58 @@ static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
return LoopBB;
}
+static MachineBasicBlock *Expand64BitScalarArithmetic(MachineInstr &MI,
+ MachineBasicBlock *BB) {
+ // For targets older than GFX12, we emit a sequence of 32-bit operations.
+ // For GFX12, we emit s_add_u64 and s_sub_u64.
+ MachineFunction *MF = BB->getParent();
+ const SIInstrInfo *TII = MF->getSubtarget<GCNSubtarget>().getInstrInfo();
+ SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
+ const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
+ MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+ const DebugLoc &DL = MI.getDebugLoc();
+ MachineOperand &Dest = MI.getOperand(0);
+ MachineOperand &Src0 = MI.getOperand(1);
+ MachineOperand &Src1 = MI.getOperand(2);
+ bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
+ if (ST.hasScalarAddSub64()) {
+ unsigned Opc = IsAdd ? AMDGPU::S_ADD_U64 : AMDGPU::S_SUB_U64;
+ // clang-format off
+ BuildMI(*BB, MI, DL, TII->get(Opc), Dest.getReg())
+ .add(Src0)
+ .add(Src1);
+ // clang-format on
+ } else {
+ const SIRegisterInfo *TRI = ST.getRegisterInfo();
+ const TargetRegisterClass *BoolRC = TRI->getBoolRC();
+
+ Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+
+ MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(
+ MI, MRI, Src0, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass);
+ MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(
+ MI, MRI, Src0, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass);
+
+ MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(
+ MI, MRI, Src1, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass);
+ MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(
+ MI, MRI, Src1, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass);
+
+ unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
+ unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
+ BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0).add(Src0Sub0).add(Src1Sub0);
+ BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1).add(Src0Sub1).add(Src1Sub1);
+ BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
+ .addReg(DestSub0)
+ .addImm(AMDGPU::sub0)
+ .addReg(DestSub1)
+ .addImm(AMDGPU::sub1);
+ }
+ MI.eraseFromParent();
+ return BB;
+}
+
static uint32_t getIdentityValueFor32BitWaveReduction(unsigned Opc) {
switch (Opc) {
case AMDGPU::S_MIN_U32:
@@ -5641,43 +5693,10 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
}
case AMDGPU::S_ADD_U64_PSEUDO:
case AMDGPU::S_SUB_U64_PSEUDO: {
- if (ST.hasScalarAddSub64()) {
- NewAccumulator = BuildMI(*ComputeLoop, I, DL,
- TII->get(Opc == AMDGPU::S_ADD_U64_PSEUDO
- ? AMDGPU::S_ADD_U64
- : AMDGPU::S_SUB_U64),
- DstReg)
- .addReg(Accumulator->getOperand(0).getReg())
- .addReg(LaneValue->getOperand(0).getReg());
- } else {
- unsigned NewOpc1 = Opc == AMDGPU::S_ADD_U64_PSEUDO
- ? AMDGPU::S_ADD_U32
- : AMDGPU::S_SUB_U32;
- unsigned NewOpc2 = Opc == AMDGPU::S_ADD_U64_PSEUDO
- ? AMDGPU::S_ADDC_U32
- : AMDGPU::S_SUBB_U32;
- Register DestLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
- Register DestHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
- MachineOperand Accumlo = TII->buildExtractSubRegOrImm(
- MI, MRI, Accumulator->getOperand(0), DstRegClass, AMDGPU::sub0,
- &AMDGPU::SReg_32RegClass);
- MachineOperand Accumhi = TII->buildExtractSubRegOrImm(
- MI, MRI, Accumulator->getOperand(0), DstRegClass, AMDGPU::sub1,
- &AMDGPU::SReg_32RegClass);
- BuildMI(*ComputeLoop, I, DL, TII->get(NewOpc1), DestLo)
- .add(Accumlo)
- .addReg(LaneValueLo->getOperand(0).getReg());
- BuildMI(*ComputeLoop, I, DL, TII->get(NewOpc2), DestHi)
- .add(Accumhi)
- .addReg(LaneValueHi->getOperand(0).getReg())
- .setOperandDead(3); // Dead scc
- NewAccumulator = BuildMI(*ComputeLoop, I, DL,
- TII->get(TargetOpcode::REG_SEQUENCE), DstReg)
- .addReg(DestLo)
- .addImm(AMDGPU::sub0)
- .addReg(DestHi)
- .addImm(AMDGPU::sub1);
- }
+ NewAccumulator = BuildMI(*ComputeLoop, I, DL, TII->get(Opc), DstReg)
+ .addReg(Accumulator->getOperand(0).getReg())
+ .addReg(LaneValue->getOperand(0).getReg());
+ ComputeLoop = Expand64BitScalarArithmetic(*NewAccumulator, ComputeLoop);
break;
}
}
@@ -5690,8 +5709,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
.addReg(ActiveBitsReg);
// Add phi nodes
- Accumulator.addReg(NewAccumulator->getOperand(0).getReg())
- .addMBB(ComputeLoop);
+ Accumulator.addReg(DstReg).addMBB(ComputeLoop);
ActiveBits.addReg(NewActiveBitsReg).addMBB(ComputeLoop);
// Creating branching
@@ -5773,55 +5791,7 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
}
case AMDGPU::S_ADD_U64_PSEUDO:
case AMDGPU::S_SUB_U64_PSEUDO: {
- // For targets older than GFX12, we emit a sequence of 32-bit operations.
- // For GFX12, we emit s_add_u64 and s_sub_u64.
- const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
- MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
- const DebugLoc &DL = MI.getDebugLoc();
- MachineOperand &Dest = MI.getOperand(0);
- MachineOperand &Src0 = MI.getOperand(1);
- MachineOperand &Src1 = MI.getOperand(2);
- bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
- if (Subtarget->hasScalarAddSub64()) {
- unsigned Opc = IsAdd ? AMDGPU::S_ADD_U64 : AMDGPU::S_SUB_U64;
- // clang-format off
- BuildMI(*BB, MI, DL, TII->get(Opc), Dest.getReg())
- .add(Src0)
- .add(Src1);
- // clang-format on
- } else {
- const SIRegisterInfo *TRI = ST.getRegisterInfo();
- const TargetRegisterClass *BoolRC = TRI->getBoolRC();
-
- Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
- Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
-
- MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(
- MI, MRI, Src0, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass);
- MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(
- MI, MRI, Src0, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass);
-
- MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(
- MI, MRI, Src1, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass);
- MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(
- MI, MRI, Src1, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass);
-
- unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
- unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
- BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
- .add(Src0Sub0)
- .add(Src1Sub0);
- BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
- .add(Src0Sub1)
- .add(Src1Sub1);
- BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
- .addReg(DestSub0)
- .addImm(AMDGPU::sub0)
- .addReg(DestSub1)
- .addImm(AMDGPU::sub1);
- }
- MI.eraseFromParent();
- return BB;
+ return Expand64BitScalarArithmetic(MI, BB);
}
case AMDGPU::V_ADD_U64_PSEUDO:
case AMDGPU::V_SUB_U64_PSEUDO: {
>From 5799dbde15b7000b9bd8f596b792019a4b2e2123 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Wed, 10 Sep 2025 14:41:18 +0530
Subject: [PATCH 9/9] removing unused variable
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 10 -
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll | 188 -----------
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll | 188 -----------
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll | 296 ------------------
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll | 172 ----------
5 files changed, 854 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index a85ad6e6ee822..4bb189e1cf09b 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5276,7 +5276,6 @@ static MachineBasicBlock *Expand64BitScalarArithmetic(MachineInstr &MI,
// For GFX12, we emit s_add_u64 and s_sub_u64.
MachineFunction *MF = BB->getParent();
const SIInstrInfo *TII = MF->getSubtarget<GCNSubtarget>().getInstrInfo();
- SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
const DebugLoc &DL = MI.getDebugLoc();
@@ -5408,15 +5407,6 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
RetBB = &BB;
break;
}
- case AMDGPU::V_CMP_LT_U64_e64: // umin
- case AMDGPU::V_CMP_LT_I64_e64: // min
- case AMDGPU::V_CMP_GT_U64_e64: // umax
- case AMDGPU::V_CMP_GT_I64_e64: { // max
- // Idempotent operations.
- BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MOV_B64), DstReg).addReg(SrcReg);
- RetBB = &BB;
- break;
- }
case AMDGPU::S_XOR_B32:
case AMDGPU::S_ADD_I32:
case AMDGPU::S_ADD_U64_PSEUDO:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
index f381a82660491..ace65a03a5abb 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
@@ -1269,21 +1269,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8DAGISEL-LABEL: divergent_value_i64:
; GFX8DAGISEL: ; %bb.0: ; %entry
; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
; GFX8DAGISEL-NEXT: s_mov_b32 s4, 0
; GFX8DAGISEL-NEXT: s_brev_b32 s5, 1
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-=======
-; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX8DAGISEL-NEXT: s_brev_b32 s5, 1
-; GFX8DAGISEL-NEXT: s_mov_b32 s4, 0
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX8DAGISEL-NEXT: s_mov_b32 s4, 0
-; GFX8DAGISEL-NEXT: s_brev_b32 s5, 1
-; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1306,21 +1294,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8GISEL-LABEL: divergent_value_i64:
; GFX8GISEL: ; %bb.0: ; %entry
; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX8GISEL-NEXT: s_mov_b32 s4, 0
-; GFX8GISEL-NEXT: s_brev_b32 s5, 1
-; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
-=======
-; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX8GISEL-NEXT: s_brev_b32 s5, 1
-; GFX8GISEL-NEXT: s_mov_b32 s4, 0
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX8GISEL-NEXT: s_mov_b32 s4, 0
; GFX8GISEL-NEXT: s_brev_b32 s5, 1
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX8GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1343,21 +1319,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9DAGISEL-LABEL: divergent_value_i64:
; GFX9DAGISEL: ; %bb.0: ; %entry
; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX9DAGISEL-NEXT: s_mov_b32 s4, 0
-; GFX9DAGISEL-NEXT: s_brev_b32 s5, 1
-; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-=======
-; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX9DAGISEL-NEXT: s_brev_b32 s5, 1
-; GFX9DAGISEL-NEXT: s_mov_b32 s4, 0
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX9DAGISEL-NEXT: s_mov_b32 s4, 0
; GFX9DAGISEL-NEXT: s_brev_b32 s5, 1
; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1380,21 +1344,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9GISEL-LABEL: divergent_value_i64:
; GFX9GISEL: ; %bb.0: ; %entry
; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX9GISEL-NEXT: s_mov_b32 s4, 0
-; GFX9GISEL-NEXT: s_brev_b32 s5, 1
-; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
-=======
-; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX9GISEL-NEXT: s_brev_b32 s5, 1
-; GFX9GISEL-NEXT: s_mov_b32 s4, 0
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX9GISEL-NEXT: s_mov_b32 s4, 0
; GFX9GISEL-NEXT: s_brev_b32 s5, 1
; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX9GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1417,21 +1369,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064DAGISEL-LABEL: divergent_value_i64:
; GFX1064DAGISEL: ; %bb.0: ; %entry
; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1064DAGISEL-NEXT: s_mov_b32 s4, 0
-; GFX1064DAGISEL-NEXT: s_brev_b32 s5, 1
-; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-=======
-; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX1064DAGISEL-NEXT: s_brev_b32 s5, 1
-; GFX1064DAGISEL-NEXT: s_mov_b32 s4, 0
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX1064DAGISEL-NEXT: s_mov_b32 s4, 0
; GFX1064DAGISEL-NEXT: s_brev_b32 s5, 1
; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1453,21 +1393,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064GISEL-LABEL: divergent_value_i64:
; GFX1064GISEL: ; %bb.0: ; %entry
; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
; GFX1064GISEL-NEXT: s_mov_b32 s4, 0
; GFX1064GISEL-NEXT: s_brev_b32 s5, 1
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
-=======
-; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX1064GISEL-NEXT: s_brev_b32 s5, 1
-; GFX1064GISEL-NEXT: s_mov_b32 s4, 0
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX1064GISEL-NEXT: s_mov_b32 s4, 0
-; GFX1064GISEL-NEXT: s_brev_b32 s5, 1
-; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX1064GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1489,21 +1417,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032DAGISEL-LABEL: divergent_value_i64:
; GFX1032DAGISEL: ; %bb.0: ; %entry
; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1032DAGISEL-NEXT: s_mov_b32 s4, 0
-; GFX1032DAGISEL-NEXT: s_brev_b32 s5, 1
-; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
-=======
-; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
-; GFX1032DAGISEL-NEXT: s_brev_b32 s5, 1
-; GFX1032DAGISEL-NEXT: s_mov_b32 s4, 0
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX1032DAGISEL-NEXT: s_mov_b32 s4, 0
; GFX1032DAGISEL-NEXT: s_brev_b32 s5, 1
; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1525,21 +1441,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032GISEL-LABEL: divergent_value_i64:
; GFX1032GISEL: ; %bb.0: ; %entry
; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1032GISEL-NEXT: s_mov_b32 s4, 0
-; GFX1032GISEL-NEXT: s_brev_b32 s5, 1
-; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
-=======
-; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
-; GFX1032GISEL-NEXT: s_brev_b32 s5, 1
-; GFX1032GISEL-NEXT: s_mov_b32 s4, 0
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX1032GISEL-NEXT: s_mov_b32 s4, 0
; GFX1032GISEL-NEXT: s_brev_b32 s5, 1
; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1561,39 +1465,16 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164DAGISEL-LABEL: divergent_value_i64:
; GFX1164DAGISEL: ; %bb.0: ; %entry
; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1164DAGISEL-NEXT: s_mov_b32 s0, 0
-; GFX1164DAGISEL-NEXT: s_brev_b32 s1, 1
-; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
-=======
-; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT: s_brev_b32 s1, 1
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164DAGISEL-NEXT: s_mov_b32 s0, 0
; GFX1164DAGISEL-NEXT: s_brev_b32 s1, 1
; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v4, s0
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v5, s1
; GFX1164DAGISEL-NEXT: v_readlane_b32 s4, v2, s8
; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v3, s8
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164DAGISEL-NEXT: v_cmp_gt_i64_e32 vcc, s[4:5], v[4:5]
; GFX1164DAGISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s8
@@ -1609,39 +1490,16 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164GISEL-LABEL: divergent_value_i64:
; GFX1164GISEL: ; %bb.0: ; %entry
; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1164GISEL-NEXT: s_mov_b32 s0, 0
-; GFX1164GISEL-NEXT: s_brev_b32 s1, 1
-; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
-=======
-; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT: s_brev_b32 s1, 1
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164GISEL-NEXT: s_mov_b32 s0, 0
; GFX1164GISEL-NEXT: s_brev_b32 s1, 1
; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164GISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164GISEL-NEXT: v_mov_b32_e32 v4, s0
; GFX1164GISEL-NEXT: v_mov_b32_e32 v5, s1
; GFX1164GISEL-NEXT: v_readlane_b32 s4, v2, s8
; GFX1164GISEL-NEXT: v_readlane_b32 s5, v3, s8
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164GISEL-NEXT: v_cmp_gt_i64_e32 vcc, s[4:5], v[4:5]
; GFX1164GISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s8
@@ -1657,38 +1515,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132DAGISEL-LABEL: divergent_value_i64:
; GFX1132DAGISEL: ; %bb.0: ; %entry
; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1132DAGISEL-NEXT: s_mov_b32 s0, 0
-; GFX1132DAGISEL-NEXT: s_brev_b32 s1, 1
-; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-=======
-; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT: s_brev_b32 s1, 1
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132DAGISEL-NEXT: s_mov_b32 s0, 0
; GFX1132DAGISEL-NEXT: s_brev_b32 s1, 1
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132DAGISEL-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[4:5], v[4:5]
; GFX1132DAGISEL-NEXT: s_and_b32 s6, vcc_lo, s2
; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
@@ -1703,38 +1538,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132GISEL-LABEL: divergent_value_i64:
; GFX1132GISEL: ; %bb.0: ; %entry
; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1132GISEL-NEXT: s_mov_b32 s0, 0
-; GFX1132GISEL-NEXT: s_brev_b32 s1, 1
-; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-=======
-; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT: s_brev_b32 s1, 1
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132GISEL-NEXT: s_mov_b32 s0, 0
; GFX1132GISEL-NEXT: s_brev_b32 s1, 1
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132GISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132GISEL-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[4:5], v[4:5]
; GFX1132GISEL-NEXT: s_and_b32 s6, vcc_lo, s2
; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
index 7a83d7fa5ced2..b12537eb0cebe 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
@@ -1269,21 +1269,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8DAGISEL-LABEL: divergent_value_i64:
; GFX8DAGISEL: ; %bb.0: ; %entry
; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
; GFX8DAGISEL-NEXT: s_mov_b32 s4, -1
; GFX8DAGISEL-NEXT: s_brev_b32 s5, -2
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-=======
-; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX8DAGISEL-NEXT: s_brev_b32 s5, -2
-; GFX8DAGISEL-NEXT: s_mov_b32 s4, -1
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX8DAGISEL-NEXT: s_mov_b32 s4, -1
-; GFX8DAGISEL-NEXT: s_brev_b32 s5, -2
-; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1306,21 +1294,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8GISEL-LABEL: divergent_value_i64:
; GFX8GISEL: ; %bb.0: ; %entry
; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX8GISEL-NEXT: s_mov_b32 s4, -1
-; GFX8GISEL-NEXT: s_brev_b32 s5, -2
-; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
-=======
-; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX8GISEL-NEXT: s_brev_b32 s5, -2
-; GFX8GISEL-NEXT: s_mov_b32 s4, -1
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX8GISEL-NEXT: s_mov_b32 s4, -1
; GFX8GISEL-NEXT: s_brev_b32 s5, -2
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX8GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1343,21 +1319,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9DAGISEL-LABEL: divergent_value_i64:
; GFX9DAGISEL: ; %bb.0: ; %entry
; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX9DAGISEL-NEXT: s_mov_b32 s4, -1
-; GFX9DAGISEL-NEXT: s_brev_b32 s5, -2
-; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-=======
-; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX9DAGISEL-NEXT: s_brev_b32 s5, -2
-; GFX9DAGISEL-NEXT: s_mov_b32 s4, -1
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX9DAGISEL-NEXT: s_mov_b32 s4, -1
; GFX9DAGISEL-NEXT: s_brev_b32 s5, -2
; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1380,21 +1344,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9GISEL-LABEL: divergent_value_i64:
; GFX9GISEL: ; %bb.0: ; %entry
; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX9GISEL-NEXT: s_mov_b32 s4, -1
-; GFX9GISEL-NEXT: s_brev_b32 s5, -2
-; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
-=======
-; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX9GISEL-NEXT: s_brev_b32 s5, -2
-; GFX9GISEL-NEXT: s_mov_b32 s4, -1
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX9GISEL-NEXT: s_mov_b32 s4, -1
; GFX9GISEL-NEXT: s_brev_b32 s5, -2
; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX9GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1417,21 +1369,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064DAGISEL-LABEL: divergent_value_i64:
; GFX1064DAGISEL: ; %bb.0: ; %entry
; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1064DAGISEL-NEXT: s_mov_b32 s4, -1
-; GFX1064DAGISEL-NEXT: s_brev_b32 s5, -2
-; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-=======
-; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX1064DAGISEL-NEXT: s_brev_b32 s5, -2
-; GFX1064DAGISEL-NEXT: s_mov_b32 s4, -1
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX1064DAGISEL-NEXT: s_mov_b32 s4, -1
; GFX1064DAGISEL-NEXT: s_brev_b32 s5, -2
; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1453,21 +1393,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064GISEL-LABEL: divergent_value_i64:
; GFX1064GISEL: ; %bb.0: ; %entry
; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
; GFX1064GISEL-NEXT: s_mov_b32 s4, -1
; GFX1064GISEL-NEXT: s_brev_b32 s5, -2
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
-=======
-; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX1064GISEL-NEXT: s_brev_b32 s5, -2
-; GFX1064GISEL-NEXT: s_mov_b32 s4, -1
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX1064GISEL-NEXT: s_mov_b32 s4, -1
-; GFX1064GISEL-NEXT: s_brev_b32 s5, -2
-; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX1064GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1489,21 +1417,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032DAGISEL-LABEL: divergent_value_i64:
; GFX1032DAGISEL: ; %bb.0: ; %entry
; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1032DAGISEL-NEXT: s_mov_b32 s4, -1
-; GFX1032DAGISEL-NEXT: s_brev_b32 s5, -2
-; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
-=======
-; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
-; GFX1032DAGISEL-NEXT: s_brev_b32 s5, -2
-; GFX1032DAGISEL-NEXT: s_mov_b32 s4, -1
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX1032DAGISEL-NEXT: s_mov_b32 s4, -1
; GFX1032DAGISEL-NEXT: s_brev_b32 s5, -2
; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1525,21 +1441,9 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032GISEL-LABEL: divergent_value_i64:
; GFX1032GISEL: ; %bb.0: ; %entry
; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1032GISEL-NEXT: s_mov_b32 s4, -1
-; GFX1032GISEL-NEXT: s_brev_b32 s5, -2
-; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
-=======
-; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
-; GFX1032GISEL-NEXT: s_brev_b32 s5, -2
-; GFX1032GISEL-NEXT: s_mov_b32 s4, -1
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX1032GISEL-NEXT: s_mov_b32 s4, -1
; GFX1032GISEL-NEXT: s_brev_b32 s5, -2
; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1561,39 +1465,16 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164DAGISEL-LABEL: divergent_value_i64:
; GFX1164DAGISEL: ; %bb.0: ; %entry
; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1164DAGISEL-NEXT: s_mov_b32 s0, -1
-; GFX1164DAGISEL-NEXT: s_brev_b32 s1, -2
-; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
-=======
-; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT: s_brev_b32 s1, -2
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164DAGISEL-NEXT: s_mov_b32 s0, -1
; GFX1164DAGISEL-NEXT: s_brev_b32 s1, -2
; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v4, s0
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v5, s1
; GFX1164DAGISEL-NEXT: v_readlane_b32 s4, v2, s8
; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v3, s8
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164DAGISEL-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[4:5]
; GFX1164DAGISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s8
@@ -1609,39 +1490,16 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164GISEL-LABEL: divergent_value_i64:
; GFX1164GISEL: ; %bb.0: ; %entry
; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1164GISEL-NEXT: s_mov_b32 s0, -1
-; GFX1164GISEL-NEXT: s_brev_b32 s1, -2
-; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
-=======
-; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT: s_brev_b32 s1, -2
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164GISEL-NEXT: s_mov_b32 s0, -1
; GFX1164GISEL-NEXT: s_brev_b32 s1, -2
; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164GISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164GISEL-NEXT: v_mov_b32_e32 v4, s0
; GFX1164GISEL-NEXT: v_mov_b32_e32 v5, s1
; GFX1164GISEL-NEXT: v_readlane_b32 s4, v2, s8
; GFX1164GISEL-NEXT: v_readlane_b32 s5, v3, s8
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164GISEL-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[4:5]
; GFX1164GISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s8
@@ -1657,38 +1515,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132DAGISEL-LABEL: divergent_value_i64:
; GFX1132DAGISEL: ; %bb.0: ; %entry
; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1132DAGISEL-NEXT: s_mov_b32 s0, -1
-; GFX1132DAGISEL-NEXT: s_brev_b32 s1, -2
-; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-=======
-; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT: s_brev_b32 s1, -2
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132DAGISEL-NEXT: s_mov_b32 s0, -1
; GFX1132DAGISEL-NEXT: s_brev_b32 s1, -2
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132DAGISEL-NEXT: v_cmp_lt_i64_e32 vcc_lo, s[4:5], v[4:5]
; GFX1132DAGISEL-NEXT: s_and_b32 s6, vcc_lo, s2
; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
@@ -1703,38 +1538,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132GISEL-LABEL: divergent_value_i64:
; GFX1132GISEL: ; %bb.0: ; %entry
; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1132GISEL-NEXT: s_mov_b32 s0, -1
-; GFX1132GISEL-NEXT: s_brev_b32 s1, -2
-; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-=======
-; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT: s_brev_b32 s1, -2
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132GISEL-NEXT: s_mov_b32 s0, -1
; GFX1132GISEL-NEXT: s_brev_b32 s1, -2
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132GISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132GISEL-NEXT: v_cmp_lt_i64_e32 vcc_lo, s[4:5], v[4:5]
; GFX1132GISEL-NEXT: s_and_b32 s6, vcc_lo, s2
; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
index 3fc5bb103873b..1f848d577d2a4 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
@@ -1194,8 +1194,6 @@ entry:
ret void
}
-<<<<<<< HEAD
-<<<<<<< HEAD
define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8DAGISEL-LABEL: divergent_value_i64:
; GFX8DAGISEL: ; %bb.0: ; %entry
@@ -1476,300 +1474,6 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132GISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX1132GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX1132GISEL-NEXT: s_setpc_b64 s[30:31]
-=======
-define amdgpu_kernel void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
-=======
-define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
-; GFX8DAGISEL-LABEL: divergent_value_i64:
-; GFX8DAGISEL: ; %bb.0: ; %entry
-; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8DAGISEL-NEXT: s_mov_b64 s[4:5], 0
-; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
-; GFX8DAGISEL-NEXT: v_mov_b32_e32 v4, s4
-; GFX8DAGISEL-NEXT: v_mov_b32_e32 v5, s5
-; GFX8DAGISEL-NEXT: v_readlane_b32 s8, v2, s12
-; GFX8DAGISEL-NEXT: v_readlane_b32 s9, v3, s12
-; GFX8DAGISEL-NEXT: v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
-; GFX8DAGISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
-; GFX8DAGISEL-NEXT: s_bitset0_b64 s[6:7], s12
-; GFX8DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
-; GFX8DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
-; GFX8DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
-; GFX8DAGISEL-NEXT: ; %bb.2:
-; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s4
-; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s5
-; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
-; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0)
-; GFX8DAGISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX8GISEL-LABEL: divergent_value_i64:
-; GFX8GISEL: ; %bb.0: ; %entry
-; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8GISEL-NEXT: s_mov_b64 s[4:5], 0
-; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX8GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
-; GFX8GISEL-NEXT: v_mov_b32_e32 v4, s4
-; GFX8GISEL-NEXT: v_mov_b32_e32 v5, s5
-; GFX8GISEL-NEXT: v_readlane_b32 s8, v2, s12
-; GFX8GISEL-NEXT: v_readlane_b32 s9, v3, s12
-; GFX8GISEL-NEXT: v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
-; GFX8GISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
-; GFX8GISEL-NEXT: s_bitset0_b64 s[6:7], s12
-; GFX8GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
-; GFX8GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
-; GFX8GISEL-NEXT: s_cbranch_scc1 .LBB8_1
-; GFX8GISEL-NEXT: ; %bb.2:
-; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s4
-; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s5
-; GFX8GISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
-; GFX8GISEL-NEXT: s_waitcnt vmcnt(0)
-; GFX8GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9DAGISEL-LABEL: divergent_value_i64:
-; GFX9DAGISEL: ; %bb.0: ; %entry
-; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], 0
-; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
-; GFX9DAGISEL-NEXT: v_mov_b32_e32 v4, s4
-; GFX9DAGISEL-NEXT: v_mov_b32_e32 v5, s5
-; GFX9DAGISEL-NEXT: v_readlane_b32 s8, v2, s12
-; GFX9DAGISEL-NEXT: v_readlane_b32 s9, v3, s12
-; GFX9DAGISEL-NEXT: v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
-; GFX9DAGISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
-; GFX9DAGISEL-NEXT: s_bitset0_b64 s[6:7], s12
-; GFX9DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
-; GFX9DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
-; GFX9DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
-; GFX9DAGISEL-NEXT: ; %bb.2:
-; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, s4
-; GFX9DAGISEL-NEXT: v_mov_b32_e32 v3, s5
-; GFX9DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
-; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0)
-; GFX9DAGISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9GISEL-LABEL: divergent_value_i64:
-; GFX9GISEL: ; %bb.0: ; %entry
-; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9GISEL-NEXT: s_mov_b64 s[4:5], 0
-; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX9GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
-; GFX9GISEL-NEXT: v_mov_b32_e32 v4, s4
-; GFX9GISEL-NEXT: v_mov_b32_e32 v5, s5
-; GFX9GISEL-NEXT: v_readlane_b32 s8, v2, s12
-; GFX9GISEL-NEXT: v_readlane_b32 s9, v3, s12
-; GFX9GISEL-NEXT: v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
-; GFX9GISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
-; GFX9GISEL-NEXT: s_bitset0_b64 s[6:7], s12
-; GFX9GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
-; GFX9GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
-; GFX9GISEL-NEXT: s_cbranch_scc1 .LBB8_1
-; GFX9GISEL-NEXT: ; %bb.2:
-; GFX9GISEL-NEXT: v_mov_b32_e32 v2, s4
-; GFX9GISEL-NEXT: v_mov_b32_e32 v3, s5
-; GFX9GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
-; GFX9GISEL-NEXT: s_waitcnt vmcnt(0)
-; GFX9GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1064DAGISEL-LABEL: divergent_value_i64:
-; GFX1064DAGISEL: ; %bb.0: ; %entry
-; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], 0
-; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
-; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v4, s4
-; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v5, s5
-; GFX1064DAGISEL-NEXT: v_readlane_b32 s8, v2, s12
-; GFX1064DAGISEL-NEXT: v_readlane_b32 s9, v3, s12
-; GFX1064DAGISEL-NEXT: v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
-; GFX1064DAGISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
-; GFX1064DAGISEL-NEXT: s_bitset0_b64 s[6:7], s12
-; GFX1064DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
-; GFX1064DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
-; GFX1064DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
-; GFX1064DAGISEL-NEXT: ; %bb.2:
-; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, s4
-; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v3, s5
-; GFX1064DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
-; GFX1064DAGISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1064GISEL-LABEL: divergent_value_i64:
-; GFX1064GISEL: ; %bb.0: ; %entry
-; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1064GISEL-NEXT: s_mov_b64 s[4:5], 0
-; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1064GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
-; GFX1064GISEL-NEXT: v_mov_b32_e32 v4, s4
-; GFX1064GISEL-NEXT: v_mov_b32_e32 v5, s5
-; GFX1064GISEL-NEXT: v_readlane_b32 s8, v2, s12
-; GFX1064GISEL-NEXT: v_readlane_b32 s9, v3, s12
-; GFX1064GISEL-NEXT: v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
-; GFX1064GISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
-; GFX1064GISEL-NEXT: s_bitset0_b64 s[6:7], s12
-; GFX1064GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
-; GFX1064GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
-; GFX1064GISEL-NEXT: s_cbranch_scc1 .LBB8_1
-; GFX1064GISEL-NEXT: ; %bb.2:
-; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, s4
-; GFX1064GISEL-NEXT: v_mov_b32_e32 v3, s5
-; GFX1064GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
-; GFX1064GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1032DAGISEL-LABEL: divergent_value_i64:
-; GFX1032DAGISEL: ; %bb.0: ; %entry
-; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1032DAGISEL-NEXT: s_mov_b64 s[4:5], 0
-; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
-; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
-; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v4, s4
-; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v5, s5
-; GFX1032DAGISEL-NEXT: v_readlane_b32 s8, v2, s7
-; GFX1032DAGISEL-NEXT: v_readlane_b32 s9, v3, s7
-; GFX1032DAGISEL-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[8:9], v[4:5]
-; GFX1032DAGISEL-NEXT: s_and_b32 s10, vcc_lo, s6
-; GFX1032DAGISEL-NEXT: s_bitset0_b32 s6, s7
-; GFX1032DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
-; GFX1032DAGISEL-NEXT: s_cmp_lg_u32 s6, 0
-; GFX1032DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
-; GFX1032DAGISEL-NEXT: ; %bb.2:
-; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, s4
-; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v3, s5
-; GFX1032DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
-; GFX1032DAGISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1032GISEL-LABEL: divergent_value_i64:
-; GFX1032GISEL: ; %bb.0: ; %entry
-; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1032GISEL-NEXT: s_mov_b64 s[4:5], 0
-; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
-; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
-; GFX1032GISEL-NEXT: v_mov_b32_e32 v4, s4
-; GFX1032GISEL-NEXT: v_mov_b32_e32 v5, s5
-; GFX1032GISEL-NEXT: v_readlane_b32 s8, v2, s7
-; GFX1032GISEL-NEXT: v_readlane_b32 s9, v3, s7
-; GFX1032GISEL-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[8:9], v[4:5]
-; GFX1032GISEL-NEXT: s_and_b32 s10, vcc_lo, s6
-; GFX1032GISEL-NEXT: s_bitset0_b32 s6, s7
-; GFX1032GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
-; GFX1032GISEL-NEXT: s_cmp_lg_u32 s6, 0
-; GFX1032GISEL-NEXT: s_cbranch_scc1 .LBB8_1
-; GFX1032GISEL-NEXT: ; %bb.2:
-; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, s4
-; GFX1032GISEL-NEXT: v_mov_b32_e32 v3, s5
-; GFX1032GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
-; GFX1032GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1164DAGISEL-LABEL: divergent_value_i64:
-; GFX1164DAGISEL: ; %bb.0: ; %entry
-; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1164DAGISEL-NEXT: s_mov_b64 s[0:1], 0
-; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
-; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
-; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v4, s0
-; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v5, s1
-; GFX1164DAGISEL-NEXT: v_readlane_b32 s4, v2, s8
-; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v3, s8
-; GFX1164DAGISEL-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[4:5]
-; GFX1164DAGISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
-; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s8
-; GFX1164DAGISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
-; GFX1164DAGISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
-; GFX1164DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
-; GFX1164DAGISEL-NEXT: ; %bb.2:
-; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v3, s1
-; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, s0
-; GFX1164DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
-; GFX1164DAGISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1164GISEL-LABEL: divergent_value_i64:
-; GFX1164GISEL: ; %bb.0: ; %entry
-; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1164GISEL-NEXT: s_mov_b64 s[0:1], 0
-; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
-; GFX1164GISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
-; GFX1164GISEL-NEXT: v_mov_b32_e32 v4, s0
-; GFX1164GISEL-NEXT: v_mov_b32_e32 v5, s1
-; GFX1164GISEL-NEXT: v_readlane_b32 s4, v2, s8
-; GFX1164GISEL-NEXT: v_readlane_b32 s5, v3, s8
-; GFX1164GISEL-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[4:5]
-; GFX1164GISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
-; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s8
-; GFX1164GISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
-; GFX1164GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
-; GFX1164GISEL-NEXT: s_cbranch_scc1 .LBB8_1
-; GFX1164GISEL-NEXT: ; %bb.2:
-; GFX1164GISEL-NEXT: v_mov_b32_e32 v3, s1
-; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, s0
-; GFX1164GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
-; GFX1164GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1132DAGISEL-LABEL: divergent_value_i64:
-; GFX1132DAGISEL: ; %bb.0: ; %entry
-; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1132DAGISEL-NEXT: s_mov_b64 s[0:1], 0
-; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
-; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
-; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
-; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
-; GFX1132DAGISEL-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[4:5], v[4:5]
-; GFX1132DAGISEL-NEXT: s_and_b32 s6, vcc_lo, s2
-; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
-; GFX1132DAGISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
-; GFX1132DAGISEL-NEXT: s_cmp_lg_u32 s2, 0
-; GFX1132DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
-; GFX1132DAGISEL-NEXT: ; %bb.2:
-; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
-; GFX1132DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
-; GFX1132DAGISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1132GISEL-LABEL: divergent_value_i64:
-; GFX1132GISEL: ; %bb.0: ; %entry
-<<<<<<< HEAD
-; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
-; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
-; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
-; GFX1132GISEL-NEXT: s_endpgm
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1132GISEL-NEXT: s_mov_b64 s[0:1], 0
-; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
-; GFX1132GISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
-; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
-; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
-; GFX1132GISEL-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[4:5], v[4:5]
-; GFX1132GISEL-NEXT: s_and_b32 s6, vcc_lo, s2
-; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
-; GFX1132GISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
-; GFX1132GISEL-NEXT: s_cmp_lg_u32 s2, 0
-; GFX1132GISEL-NEXT: s_cbranch_scc1 .LBB8_1
-; GFX1132GISEL-NEXT: ; %bb.2:
-; GFX1132GISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
-; GFX1132GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
-; GFX1132GISEL-NEXT: s_setpc_b64 s[30:31]
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
entry:
%result = call i64 @llvm.amdgcn.wave.reduce.umax.i64(i64 %id.x, i32 1)
store i64 %result, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
index 29412bc1ed31d..c2cfb8828c30c 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
@@ -1198,19 +1198,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8DAGISEL-LABEL: divergent_value_i64:
; GFX8DAGISEL: ; %bb.0: ; %entry
; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
; GFX8DAGISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-=======
-; GFX8DAGISEL-NEXT: s_mov_b32 s4, -1
-; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX8DAGISEL-NEXT: s_mov_b32 s5, s4
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX8DAGISEL-NEXT: s_mov_b64 s[4:5], -1
-; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1233,19 +1222,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX8GISEL-LABEL: divergent_value_i64:
; GFX8GISEL: ; %bb.0: ; %entry
; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
; GFX8GISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
-=======
-; GFX8GISEL-NEXT: s_mov_b32 s4, -1
-; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX8GISEL-NEXT: s_mov_b32 s5, s4
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX8GISEL-NEXT: s_mov_b64 s[4:5], -1
-; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX8GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX8GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1268,19 +1246,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9DAGISEL-LABEL: divergent_value_i64:
; GFX9DAGISEL: ; %bb.0: ; %entry
; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], -1
-; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-=======
-; GFX9DAGISEL-NEXT: s_mov_b32 s4, -1
-; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX9DAGISEL-NEXT: s_mov_b32 s5, s4
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1303,19 +1270,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX9GISEL-LABEL: divergent_value_i64:
; GFX9GISEL: ; %bb.0: ; %entry
; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX9GISEL-NEXT: s_mov_b64 s[4:5], -1
-; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
-=======
-; GFX9GISEL-NEXT: s_mov_b32 s4, -1
-; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX9GISEL-NEXT: s_mov_b32 s5, s4
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX9GISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX9GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX9GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1338,19 +1294,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064DAGISEL-LABEL: divergent_value_i64:
; GFX1064DAGISEL: ; %bb.0: ; %entry
; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], -1
-; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-=======
-; GFX1064DAGISEL-NEXT: s_mov_b32 s4, -1
-; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX1064DAGISEL-NEXT: s_mov_b32 s5, s4
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1372,19 +1317,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1064GISEL-LABEL: divergent_value_i64:
; GFX1064GISEL: ; %bb.0: ; %entry
; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
; GFX1064GISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
-=======
-; GFX1064GISEL-NEXT: s_mov_b32 s4, -1
-; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX1064GISEL-NEXT: s_mov_b32 s5, s4
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX1064GISEL-NEXT: s_mov_b64 s[4:5], -1
-; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1064GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
; GFX1064GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1406,19 +1340,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032DAGISEL-LABEL: divergent_value_i64:
; GFX1032DAGISEL: ; %bb.0: ; %entry
; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
; GFX1032DAGISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
-=======
-; GFX1032DAGISEL-NEXT: s_mov_b32 s4, -1
-; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
-; GFX1032DAGISEL-NEXT: s_mov_b32 s5, s4
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX1032DAGISEL-NEXT: s_mov_b64 s[4:5], -1
-; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1440,19 +1363,8 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1032GISEL-LABEL: divergent_value_i64:
; GFX1032GISEL: ; %bb.0: ; %entry
; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1032GISEL-NEXT: s_mov_b64 s[4:5], -1
-; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
-=======
-; GFX1032GISEL-NEXT: s_mov_b32 s4, -1
-; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
-; GFX1032GISEL-NEXT: s_mov_b32 s5, s4
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX1032GISEL-NEXT: s_mov_b64 s[4:5], -1
; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
; GFX1032GISEL-NEXT: v_mov_b32_e32 v4, s4
@@ -1474,36 +1386,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164DAGISEL-LABEL: divergent_value_i64:
; GFX1164DAGISEL: ; %bb.0: ; %entry
; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
; GFX1164DAGISEL-NEXT: s_mov_b64 s[0:1], -1
; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
-=======
-; GFX1164DAGISEL-NEXT: s_mov_b32 s0, -1
-=======
-; GFX1164DAGISEL-NEXT: s_mov_b64 s[0:1], -1
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
-; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v4, s0
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v5, s1
; GFX1164DAGISEL-NEXT: v_readlane_b32 s4, v2, s8
; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v3, s8
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164DAGISEL-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[4:5]
; GFX1164DAGISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s8
@@ -1519,36 +1410,15 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1164GISEL-LABEL: divergent_value_i64:
; GFX1164GISEL: ; %bb.0: ; %entry
; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1164GISEL-NEXT: s_mov_b64 s[0:1], -1
-; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
-=======
-; GFX1164GISEL-NEXT: s_mov_b32 s0, -1
-=======
; GFX1164GISEL-NEXT: s_mov_b64 s[0:1], -1
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164GISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164GISEL-NEXT: v_mov_b32_e32 v4, s0
; GFX1164GISEL-NEXT: v_mov_b32_e32 v5, s1
; GFX1164GISEL-NEXT: v_readlane_b32 s4, v2, s8
; GFX1164GISEL-NEXT: v_readlane_b32 s5, v3, s8
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1164GISEL-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[4:5]
; GFX1164GISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s8
@@ -1564,35 +1434,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132DAGISEL-LABEL: divergent_value_i64:
; GFX1132DAGISEL: ; %bb.0: ; %entry
; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
-; GFX1132DAGISEL-NEXT: s_mov_b64 s[0:1], -1
-; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-=======
-; GFX1132DAGISEL-NEXT: s_mov_b32 s0, -1
-=======
; GFX1132DAGISEL-NEXT: s_mov_b64 s[0:1], -1
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132DAGISEL-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[4:5], v[4:5]
; GFX1132DAGISEL-NEXT: s_and_b32 s6, vcc_lo, s2
; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
@@ -1607,35 +1456,14 @@ define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
; GFX1132GISEL-LABEL: divergent_value_i64:
; GFX1132GISEL: ; %bb.0: ; %entry
; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-<<<<<<< HEAD
-<<<<<<< HEAD
; GFX1132GISEL-NEXT: s_mov_b64 s[0:1], -1
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-=======
-; GFX1132GISEL-NEXT: s_mov_b32 s0, -1
-=======
-; GFX1132GISEL-NEXT: s_mov_b64 s[0:1], -1
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
-; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-<<<<<<< HEAD
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
-; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
; GFX1132GISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
->>>>>>> 381cb9fada25 ([AMDGPU] Extending wave reduction intrinsics for `i64` types - 1)
-=======
->>>>>>> 4d2b4133488e (Using `S_MOV_B64_IMM_PSEUDO` instead of dealing with legality concerns.)
; GFX1132GISEL-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[4:5], v[4:5]
; GFX1132GISEL-NEXT: s_and_b32 s6, vcc_lo, s2
; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
More information about the llvm-commits
mailing list