[llvm-branch-commits] [llvm] [AMDGPU] Extending wave reduction intrinsics for `i64` types - 3 (PR #151310)
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Wed Aug 6 23:27:29 PDT 2025
https://github.com/easyonaadit updated https://github.com/llvm/llvm-project/pull/151310
>From fa8407c09f344d8c4ab71d175501a0297a34c8a1 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Sat, 19 Jul 2025 12:48:18 +0530
Subject: [PATCH 1/4] [AMDGPU] Extending wave reduction intrinsics for `i64`
types - 3
Supporting Arithemtic Operations: `and`, `or`, `xor`
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 104 +-
llvm/lib/Target/AMDGPU/SIInstructions.td | 3 +
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.and.ll | 854 ++++++++++
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.or.ll | 855 ++++++++++
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll | 1413 +++++++++++++++++
5 files changed, 3210 insertions(+), 19 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index f6338ffac59ca..d8b155d05d8bf 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5111,9 +5111,12 @@ static uint32_t getIdentityValueForWaveReduction(unsigned Opc) {
case AMDGPU::S_SUB_I32:
case AMDGPU::S_SUB_U64_PSEUDO:
case AMDGPU::S_OR_B32:
+ case AMDGPU::S_OR_B64:
case AMDGPU::S_XOR_B32:
+ case AMDGPU::S_XOR_B64:
return std::numeric_limits<uint32_t>::min();
case AMDGPU::S_AND_B32:
+ case AMDGPU::S_AND_B64:
return std::numeric_limits<uint32_t>::max();
default:
llvm_unreachable("Unexpected opcode in getIdentityValueForWaveReduction");
@@ -5146,7 +5149,9 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
case AMDGPU::S_MAX_I32:
case AMDGPU::V_CMP_GT_I64_e64: /*max*/
case AMDGPU::S_AND_B32:
- case AMDGPU::S_OR_B32: {
+ case AMDGPU::S_AND_B64:
+ case AMDGPU::S_OR_B32:
+ case AMDGPU::S_OR_B64: {
// Idempotent operations.
unsigned movOpc = is32BitOpc ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
BuildMI(BB, MI, DL, TII->get(movOpc), DstReg).addReg(SrcReg);
@@ -5154,6 +5159,7 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
break;
}
case AMDGPU::S_XOR_B32:
+ case AMDGPU::S_XOR_B64:
case AMDGPU::S_ADD_I32:
case AMDGPU::S_ADD_U64_PSEUDO:
case AMDGPU::S_SUB_I32:
@@ -5176,24 +5182,69 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
BuildMI(BB, MI, DL, TII->get(BitCountOpc), NumActiveLanes)
.addReg(ExecMask);
- switch (Opc) {
- case AMDGPU::S_XOR_B32: {
- // Performing an XOR operation on a uniform value
- // depends on the parity of the number of active lanes.
- // For even parity, the result will be 0, for odd
- // parity the result will be the same as the input value.
- Register ParityRegister =
- MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
-
- BuildMI(BB, MI, DL, TII->get(AMDGPU::S_AND_B32), ParityRegister)
- .addReg(NewAccumulator->getOperand(0).getReg())
- .addImm(1)
- .setOperandDead(3); // Dead scc
- BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DstReg)
- .addReg(SrcReg)
- .addReg(ParityRegister);
- break;
- }
+ switch (Opc) {
+ case AMDGPU::S_XOR_B32:
+ case AMDGPU::S_XOR_B64: {
+ // Performing an XOR operation on a uniform value
+ // depends on the parity of the number of active lanes.
+ // For even parity, the result will be 0, for odd
+ // parity the result will be the same as the input value.
+ Register ParityRegister =
+ MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_AND_B32), ParityRegister)
+ .addReg(NewAccumulator->getOperand(0).getReg())
+ .addImm(1)
+ .setOperandDead(3); // Dead scc
+ if (is32BitOpc) {
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DstReg)
+ .addReg(SrcReg)
+ .addReg(ParityRegister);
+ break;
+ } else {
+ Register DestSub0 =
+ MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ Register DestSub1 =
+ MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ Register Op1H_Op0L_Reg =
+ MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ Register CarryReg =
+ MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+
+ const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
+ const TargetRegisterClass *SrcSubRC =
+ TRI->getSubRegisterClass(SrcRC, AMDGPU::sub0);
+
+ MachineOperand Op1L = TII->buildExtractSubRegOrImm(
+ MI, MRI, MI.getOperand(1), SrcRC, AMDGPU::sub0, SrcSubRC);
+ MachineOperand Op1H = TII->buildExtractSubRegOrImm(
+ MI, MRI, MI.getOperand(1), SrcRC, AMDGPU::sub1, SrcSubRC);
+
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DestSub0)
+ .add(Op1L)
+ .addReg(ParityRegister);
+
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), Op1H_Op0L_Reg)
+ .add(Op1H)
+ .addReg(ParityRegister);
+
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_HI_U32), CarryReg)
+ .add(Op1L)
+ .addReg(ParityRegister);
+
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_ADD_U32), DestSub1)
+ .addReg(CarryReg)
+ .addReg(Op1H_Op0L_Reg)
+ .setOperandDead(3); // Dead scc
+
+ BuildMI(BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), DstReg)
+ .addReg(DestSub0)
+ .addImm(AMDGPU::sub0)
+ .addReg(DestSub1)
+ .addImm(AMDGPU::sub1);
+ break;
+ }
+ }
case AMDGPU::S_SUB_I32: {
Register NegatedVal = MRI.createVirtualRegister(DstRegClass);
@@ -5407,6 +5458,15 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
.addReg(LaneValueHiReg)
.addImm(AMDGPU::sub1);
switch (Opc) {
+ case ::AMDGPU::S_OR_B64:
+ case ::AMDGPU::S_AND_B64:
+ case ::AMDGPU::S_XOR_B64: {
+ NewAccumulator = BuildMI(*ComputeLoop, I, DL, TII->get(Opc), DstReg)
+ .addReg(Accumulator->getOperand(0).getReg())
+ .addReg(LaneValue->getOperand(0).getReg())
+ .setOperandDead(3); // Dead scc
+ break;
+ }
case AMDGPU::V_CMP_GT_I64_e64:
case AMDGPU::V_CMP_GT_U64_e64:
case AMDGPU::V_CMP_LT_I64_e64:
@@ -5538,10 +5598,16 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_SUB_U64_PSEUDO);
case AMDGPU::WAVE_REDUCE_AND_PSEUDO_B32:
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_AND_B32);
+ case AMDGPU::WAVE_REDUCE_AND_PSEUDO_B64:
+ return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_AND_B64);
case AMDGPU::WAVE_REDUCE_OR_PSEUDO_B32:
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_OR_B32);
+ case AMDGPU::WAVE_REDUCE_OR_PSEUDO_B64:
+ return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_OR_B64);
case AMDGPU::WAVE_REDUCE_XOR_PSEUDO_B32:
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_XOR_B32);
+ case AMDGPU::WAVE_REDUCE_XOR_PSEUDO_B64:
+ return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_XOR_B64);
case AMDGPU::S_UADDO_PSEUDO:
case AMDGPU::S_USUBO_PSEUDO: {
const DebugLoc &DL = MI.getDebugLoc();
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 64697673fa1b1..9403ec33003f9 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -347,6 +347,9 @@ defvar Operations = [
WaveReduceOp<"max", "I64", i64, SGPR_64, VSrc_b64>,
WaveReduceOp<"add", "U64", i64, SGPR_64, VSrc_b64>,
WaveReduceOp<"sub", "U64", i64, SGPR_64, VSrc_b64>,
+ WaveReduceOp<"and", "B64", i64, SGPR_64, VSrc_b64>,
+ WaveReduceOp<"or", "B64", i64, SGPR_64, VSrc_b64>,
+ WaveReduceOp<"xor", "B64", i64, SGPR_64, VSrc_b64>,
];
foreach Op = Operations in {
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.and.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.and.ll
index 356b0e73b39e7..55e6189f65675 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.and.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.and.ll
@@ -980,3 +980,857 @@ endif:
store i32 %combine, ptr addrspace(1) %out
ret void
}
+
+define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: uniform_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: uniform_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: uniform_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: uniform_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: uniform_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: uniform_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: uniform_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: uniform_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: uniform_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: uniform_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.and.i64(i64 %in, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
+; GFX8DAGISEL-LABEL: const_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: const_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: const_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: const_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: const_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: const_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX10GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX10GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: const_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: const_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: const_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: const_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.and.i64(i64 123, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: poison_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: poison_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: poison_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: poison_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: poison_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT: global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: poison_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT: global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX11DAGISEL-LABEL: poison_value_i64:
+; GFX11DAGISEL: ; %bb.0: ; %entry
+; GFX11DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX11DAGISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX11DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11DAGISEL-NEXT: global_store_b64 v0, v[0:1], s[0:1]
+; GFX11DAGISEL-NEXT: s_endpgm
+;
+; GFX11GISEL-LABEL: poison_value_i64:
+; GFX11GISEL: ; %bb.0: ; %entry
+; GFX11GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX11GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX11GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11GISEL-NEXT: global_store_b64 v0, v[0:1], s[0:1]
+; GFX11GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.and.i64(i64 poison, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
+; GFX8DAGISEL-LABEL: divergent_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s10, s[6:7]
+; GFX8DAGISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX8DAGISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX8DAGISEL-NEXT: s_bitset0_b64 s[6:7], s10
+; GFX8DAGISEL-NEXT: s_and_b64 s[4:5], s[4:5], s[8:9]
+; GFX8DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX8DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX8DAGISEL-NEXT: ; %bb.2:
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8GISEL-LABEL: divergent_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b32 s4, -1
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT: s_mov_b32 s5, s4
+; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8GISEL-NEXT: s_ff1_i32_b64 s10, s[6:7]
+; GFX8GISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX8GISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX8GISEL-NEXT: s_bitset0_b64 s[6:7], s10
+; GFX8GISEL-NEXT: s_and_b64 s[4:5], s[4:5], s[8:9]
+; GFX8GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX8GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX8GISEL-NEXT: ; %bb.2:
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9DAGISEL-LABEL: divergent_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s10, s[6:7]
+; GFX9DAGISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX9DAGISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX9DAGISEL-NEXT: s_bitset0_b64 s[6:7], s10
+; GFX9DAGISEL-NEXT: s_and_b64 s[4:5], s[4:5], s[8:9]
+; GFX9DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX9DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX9DAGISEL-NEXT: ; %bb.2:
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9GISEL-LABEL: divergent_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b32 s4, -1
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT: s_mov_b32 s5, s4
+; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9GISEL-NEXT: s_ff1_i32_b64 s10, s[6:7]
+; GFX9GISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX9GISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX9GISEL-NEXT: s_bitset0_b64 s[6:7], s10
+; GFX9GISEL-NEXT: s_and_b64 s[4:5], s[4:5], s[8:9]
+; GFX9GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX9GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX9GISEL-NEXT: ; %bb.2:
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX9GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064DAGISEL-LABEL: divergent_value_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s10, s[6:7]
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX1064DAGISEL-NEXT: s_bitset0_b64 s[6:7], s10
+; GFX1064DAGISEL-NEXT: s_and_b64 s[4:5], s[4:5], s[8:9]
+; GFX1064DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX1064DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1064DAGISEL-NEXT: ; %bb.2:
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064GISEL-LABEL: divergent_value_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b32 s4, -1
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT: s_mov_b32 s5, s4
+; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064GISEL-NEXT: s_ff1_i32_b64 s10, s[6:7]
+; GFX1064GISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX1064GISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX1064GISEL-NEXT: s_bitset0_b64 s[6:7], s10
+; GFX1064GISEL-NEXT: s_and_b64 s[4:5], s[4:5], s[8:9]
+; GFX1064GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX1064GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1064GISEL-NEXT: ; %bb.2:
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1064GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032DAGISEL-LABEL: divergent_value_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s8, v2, s7
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s9, v3, s7
+; GFX1032DAGISEL-NEXT: s_bitset0_b32 s6, s7
+; GFX1032DAGISEL-NEXT: s_and_b64 s[4:5], s[4:5], s[8:9]
+; GFX1032DAGISEL-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1032DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1032DAGISEL-NEXT: ; %bb.2:
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032GISEL-LABEL: divergent_value_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b32 s4, -1
+; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT: s_mov_b32 s5, s4
+; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
+; GFX1032GISEL-NEXT: v_readlane_b32 s8, v2, s7
+; GFX1032GISEL-NEXT: v_readlane_b32 s9, v3, s7
+; GFX1032GISEL-NEXT: s_bitset0_b32 s6, s7
+; GFX1032GISEL-NEXT: s_and_b64 s[4:5], s[4:5], s[8:9]
+; GFX1032GISEL-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1032GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1032GISEL-NEXT: ; %bb.2:
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164DAGISEL-LABEL: divergent_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_mov_b32 s0, -1
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT: s_mov_b32 s1, s0
+; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s6, s[2:3]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s4, v2, s6
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v3, s6
+; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s6
+; GFX1164DAGISEL-NEXT: s_and_b64 s[0:1], s[0:1], s[4:5]
+; GFX1164DAGISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1164DAGISEL-NEXT: ; %bb.2:
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1164DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164GISEL-LABEL: divergent_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b32 s0, -1
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT: s_mov_b32 s1, s0
+; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT: s_ctz_i32_b64 s6, s[2:3]
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: v_readlane_b32 s4, v2, s6
+; GFX1164GISEL-NEXT: v_readlane_b32 s5, v3, s6
+; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s6
+; GFX1164GISEL-NEXT: s_and_b64 s[0:1], s[0:1], s[4:5]
+; GFX1164GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1164GISEL-NEXT: ; %bb.2:
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1164GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132DAGISEL-LABEL: divergent_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_mov_b32 s0, -1
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT: s_mov_b32 s1, s0
+; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX1132DAGISEL-NEXT: s_and_b64 s[0:1], s[0:1], s[4:5]
+; GFX1132DAGISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1132DAGISEL-NEXT: ; %bb.2:
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132GISEL-LABEL: divergent_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b32 s0, -1
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT: s_mov_b32 s1, s0
+; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX1132GISEL-NEXT: s_and_b64 s[0:1], s[0:1], s[4:5]
+; GFX1132GISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1132GISEL-NEXT: ; %bb.2:
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132GISEL-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.and.i64(i64 %id.x, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64 %in2) {
+; GFX8DAGISEL-LABEL: divergent_cfg_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX8DAGISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GFX8DAGISEL-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
+; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[6:7], s[6:7]
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8DAGISEL-NEXT: s_xor_b64 exec, exec, s[6:7]
+; GFX8DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX8DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX8DAGISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: divergent_cfg_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX8GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX8GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX8GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX8GISEL-NEXT: ; %bb.1: ; %else
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX8GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX8GISEL-NEXT: ; %bb.3: ; %if
+; GFX8GISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX8GISEL-NEXT: .LBB9_4: ; %endif
+; GFX8GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: divergent_cfg_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX9DAGISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9DAGISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[4:5], s[4:5]
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: s_xor_b64 exec, exec, s[4:5]
+; GFX9DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX9DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX9DAGISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: divergent_cfg_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX9GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX9GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX9GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX9GISEL-NEXT: ; %bb.1: ; %else
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX9GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX9GISEL-NEXT: ; %bb.3: ; %if
+; GFX9GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], s[6:7]
+; GFX9GISEL-NEXT: .LBB9_4: ; %endif
+; GFX9GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX1064DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_clause 0x1
+; GFX1064DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064DAGISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[4:5], s[4:5]
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT: s_xor_b64 exec, exec, s[4:5]
+; GFX1064DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1064DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1064DAGISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT: s_endpgm
+;
+; GFX1064GISEL-LABEL: divergent_cfg_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX1064GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1064GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX1064GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1064GISEL-NEXT: ; %bb.1: ; %else
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1064GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1064GISEL-NEXT: ; %bb.3: ; %if
+; GFX1064GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], s[6:7]
+; GFX1064GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1064GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT: s_endpgm
+;
+; GFX1032DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_clause 0x1
+; GFX1032DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc_lo, 15, v0
+; GFX1032DAGISEL-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032DAGISEL-NEXT: s_xor_b32 s4, exec_lo, s4
+; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s4, s4
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s4
+; GFX1032DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1032DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1032DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT: s_endpgm
+;
+; GFX1032GISEL-LABEL: divergent_cfg_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 16, v0
+; GFX1032GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1032GISEL-NEXT: s_and_saveexec_b32 s8, vcc_lo
+; GFX1032GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1032GISEL-NEXT: ; %bb.1: ; %else
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1032GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s2, s8
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1032GISEL-NEXT: ; %bb.3: ; %if
+; GFX1032GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b64 s[6:7], s[6:7]
+; GFX1032GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1032GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_clause 0x1
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX1164DAGISEL-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
+; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[6:7], s[6:7]
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: s_xor_b64 exec, exec, s[6:7]
+; GFX1164DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX1164DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1164DAGISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: divergent_cfg_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164GISEL-NEXT: s_mov_b64 s[8:9], exec
+; GFX1164GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
+; GFX1164GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1164GISEL-NEXT: ; %bb.1: ; %else
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1164GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[2:3], s[8:9]
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1164GISEL-NEXT: ; %bb.3: ; %if
+; GFX1164GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX1164GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1164GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_clause 0x1
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX1132DAGISEL-NEXT: s_xor_b32 s6, exec_lo, s6
+; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s6, s6
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s6
+; GFX1132DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX1132DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1132DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s6
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: divergent_cfg_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132GISEL-NEXT: s_mov_b32 s8, exec_lo
+; GFX1132GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
+; GFX1132GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1132GISEL-NEXT: ; %bb.1: ; %else
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1132GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s2, s8
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1132GISEL-NEXT: ; %bb.3: ; %if
+; GFX1132GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX1132GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1132GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %d_cmp = icmp ult i32 %tid, 16
+ br i1 %d_cmp, label %if, label %else
+
+if:
+ %reducedValTid = call i64 @llvm.amdgcn.wave.reduce.and.i64(i64 %in2, i32 1)
+ br label %endif
+
+else:
+ %reducedValIn = call i64 @llvm.amdgcn.wave.reduce.and.i64(i64 %in, i32 1)
+ br label %endif
+
+endif:
+ %combine = phi i64 [%reducedValTid, %if], [%reducedValIn, %else]
+ store i64 %combine, ptr addrspace(1) %out
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.or.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.or.ll
index e08787e6ba70a..1849eaecbe143 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.or.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.or.ll
@@ -980,3 +980,858 @@ endif:
store i32 %combine, ptr addrspace(1) %out
ret void
}
+
+define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: uniform_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: uniform_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: uniform_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: uniform_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: uniform_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: uniform_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: uniform_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: uniform_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: uniform_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: uniform_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.or.i64(i64 %in, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
+; GFX8DAGISEL-LABEL: const_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: const_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: const_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: const_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: const_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: const_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX10GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX10GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: const_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: const_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: const_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: const_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.or.i64(i64 123, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: poison_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: poison_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: poison_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: poison_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: poison_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT: global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: poison_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT: global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX11DAGISEL-LABEL: poison_value_i64:
+; GFX11DAGISEL: ; %bb.0: ; %entry
+; GFX11DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX11DAGISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX11DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11DAGISEL-NEXT: global_store_b64 v0, v[0:1], s[0:1]
+; GFX11DAGISEL-NEXT: s_endpgm
+;
+; GFX11GISEL-LABEL: poison_value_i64:
+; GFX11GISEL: ; %bb.0: ; %entry
+; GFX11GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX11GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX11GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11GISEL-NEXT: global_store_b64 v0, v[0:1], s[0:1]
+; GFX11GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.or.i64(i64 poison, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
+; GFX8DAGISEL-LABEL: divergent_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s10, s[6:7]
+; GFX8DAGISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX8DAGISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX8DAGISEL-NEXT: s_bitset0_b64 s[6:7], s10
+; GFX8DAGISEL-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX8DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX8DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX8DAGISEL-NEXT: ; %bb.2:
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8GISEL-LABEL: divergent_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b32 s4, 0
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT: s_mov_b32 s5, s4
+; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8GISEL-NEXT: s_ff1_i32_b64 s10, s[6:7]
+; GFX8GISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX8GISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX8GISEL-NEXT: s_bitset0_b64 s[6:7], s10
+; GFX8GISEL-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX8GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX8GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX8GISEL-NEXT: ; %bb.2:
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9DAGISEL-LABEL: divergent_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s10, s[6:7]
+; GFX9DAGISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX9DAGISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX9DAGISEL-NEXT: s_bitset0_b64 s[6:7], s10
+; GFX9DAGISEL-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX9DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX9DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX9DAGISEL-NEXT: ; %bb.2:
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9GISEL-LABEL: divergent_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b32 s4, 0
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT: s_mov_b32 s5, s4
+; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9GISEL-NEXT: s_ff1_i32_b64 s10, s[6:7]
+; GFX9GISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX9GISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX9GISEL-NEXT: s_bitset0_b64 s[6:7], s10
+; GFX9GISEL-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX9GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX9GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX9GISEL-NEXT: ; %bb.2:
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX9GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064DAGISEL-LABEL: divergent_value_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s10, s[6:7]
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX1064DAGISEL-NEXT: s_bitset0_b64 s[6:7], s10
+; GFX1064DAGISEL-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX1064DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX1064DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1064DAGISEL-NEXT: ; %bb.2:
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064GISEL-LABEL: divergent_value_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b32 s4, 0
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT: s_mov_b32 s5, s4
+; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064GISEL-NEXT: s_ff1_i32_b64 s10, s[6:7]
+; GFX1064GISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX1064GISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX1064GISEL-NEXT: s_bitset0_b64 s[6:7], s10
+; GFX1064GISEL-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX1064GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX1064GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1064GISEL-NEXT: ; %bb.2:
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1064GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032DAGISEL-LABEL: divergent_value_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s8, v2, s7
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s9, v3, s7
+; GFX1032DAGISEL-NEXT: s_bitset0_b32 s6, s7
+; GFX1032DAGISEL-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX1032DAGISEL-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1032DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1032DAGISEL-NEXT: ; %bb.2:
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032GISEL-LABEL: divergent_value_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b32 s4, 0
+; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT: s_mov_b32 s5, s4
+; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
+; GFX1032GISEL-NEXT: v_readlane_b32 s8, v2, s7
+; GFX1032GISEL-NEXT: v_readlane_b32 s9, v3, s7
+; GFX1032GISEL-NEXT: s_bitset0_b32 s6, s7
+; GFX1032GISEL-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX1032GISEL-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1032GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1032GISEL-NEXT: ; %bb.2:
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164DAGISEL-LABEL: divergent_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_mov_b32 s0, 0
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT: s_mov_b32 s1, s0
+; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s6, s[2:3]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s4, v2, s6
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v3, s6
+; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s6
+; GFX1164DAGISEL-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
+; GFX1164DAGISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1164DAGISEL-NEXT: ; %bb.2:
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1164DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164GISEL-LABEL: divergent_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b32 s0, 0
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT: s_mov_b32 s1, s0
+; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT: s_ctz_i32_b64 s6, s[2:3]
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: v_readlane_b32 s4, v2, s6
+; GFX1164GISEL-NEXT: v_readlane_b32 s5, v3, s6
+; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s6
+; GFX1164GISEL-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
+; GFX1164GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1164GISEL-NEXT: ; %bb.2:
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1164GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132DAGISEL-LABEL: divergent_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_mov_b32 s0, 0
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT: s_mov_b32 s1, s0
+; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX1132DAGISEL-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
+; GFX1132DAGISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1132DAGISEL-NEXT: ; %bb.2:
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132GISEL-LABEL: divergent_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b32 s0, 0
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT: s_mov_b32 s1, s0
+; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX1132GISEL-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
+; GFX1132GISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1132GISEL-NEXT: ; %bb.2:
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132GISEL-NEXT: s_setpc_b64 s[30:31]
+entry:
+ ; %id.x = call i32 @llvm.amdgcn.workitem.id.x()
+ %result = call i64 @llvm.amdgcn.wave.reduce.or.i64(i64 %id.x, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64 %in2) {
+; GFX8DAGISEL-LABEL: divergent_cfg_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX8DAGISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GFX8DAGISEL-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
+; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[6:7], s[6:7]
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8DAGISEL-NEXT: s_xor_b64 exec, exec, s[6:7]
+; GFX8DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX8DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX8DAGISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: divergent_cfg_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX8GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX8GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX8GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX8GISEL-NEXT: ; %bb.1: ; %else
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX8GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX8GISEL-NEXT: ; %bb.3: ; %if
+; GFX8GISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX8GISEL-NEXT: .LBB9_4: ; %endif
+; GFX8GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: divergent_cfg_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX9DAGISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9DAGISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[4:5], s[4:5]
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: s_xor_b64 exec, exec, s[4:5]
+; GFX9DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX9DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX9DAGISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: divergent_cfg_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX9GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX9GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX9GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX9GISEL-NEXT: ; %bb.1: ; %else
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX9GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX9GISEL-NEXT: ; %bb.3: ; %if
+; GFX9GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], s[6:7]
+; GFX9GISEL-NEXT: .LBB9_4: ; %endif
+; GFX9GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX1064DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_clause 0x1
+; GFX1064DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064DAGISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[4:5], s[4:5]
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT: s_xor_b64 exec, exec, s[4:5]
+; GFX1064DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1064DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1064DAGISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT: s_endpgm
+;
+; GFX1064GISEL-LABEL: divergent_cfg_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX1064GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1064GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX1064GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1064GISEL-NEXT: ; %bb.1: ; %else
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1064GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1064GISEL-NEXT: ; %bb.3: ; %if
+; GFX1064GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], s[6:7]
+; GFX1064GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1064GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT: s_endpgm
+;
+; GFX1032DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_clause 0x1
+; GFX1032DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc_lo, 15, v0
+; GFX1032DAGISEL-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032DAGISEL-NEXT: s_xor_b32 s4, exec_lo, s4
+; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s4, s4
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s4
+; GFX1032DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1032DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1032DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT: s_endpgm
+;
+; GFX1032GISEL-LABEL: divergent_cfg_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 16, v0
+; GFX1032GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1032GISEL-NEXT: s_and_saveexec_b32 s8, vcc_lo
+; GFX1032GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1032GISEL-NEXT: ; %bb.1: ; %else
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1032GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s2, s8
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1032GISEL-NEXT: ; %bb.3: ; %if
+; GFX1032GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b64 s[6:7], s[6:7]
+; GFX1032GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1032GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_clause 0x1
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX1164DAGISEL-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
+; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[6:7], s[6:7]
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: s_xor_b64 exec, exec, s[6:7]
+; GFX1164DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX1164DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1164DAGISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: divergent_cfg_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164GISEL-NEXT: s_mov_b64 s[8:9], exec
+; GFX1164GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
+; GFX1164GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1164GISEL-NEXT: ; %bb.1: ; %else
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1164GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[2:3], s[8:9]
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1164GISEL-NEXT: ; %bb.3: ; %if
+; GFX1164GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX1164GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1164GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_clause 0x1
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX1132DAGISEL-NEXT: s_xor_b32 s6, exec_lo, s6
+; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s6, s6
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s6
+; GFX1132DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX1132DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1132DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s6
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: divergent_cfg_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132GISEL-NEXT: s_mov_b32 s8, exec_lo
+; GFX1132GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
+; GFX1132GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1132GISEL-NEXT: ; %bb.1: ; %else
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1132GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s2, s8
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1132GISEL-NEXT: ; %bb.3: ; %if
+; GFX1132GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX1132GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1132GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %d_cmp = icmp ult i32 %tid, 16
+ br i1 %d_cmp, label %if, label %else
+
+if:
+ %reducedValTid = call i64 @llvm.amdgcn.wave.reduce.or.i64(i64 %in2, i32 1)
+ br label %endif
+
+else:
+ %reducedValIn = call i64 @llvm.amdgcn.wave.reduce.or.i64(i64 %in, i32 1)
+ br label %endif
+
+endif:
+ %combine = phi i64 [%reducedValTid, %if], [%reducedValIn, %else]
+ store i64 %combine, ptr addrspace(1) %out
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll
index 5b21d5c3aaeb6..b96954d030fef 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll
@@ -1279,6 +1279,1419 @@ endif:
store i32 %combine, ptr addrspace(1) %out
ret void
}
+
+define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: uniform_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX8DAGISEL-NEXT: s_and_b32 s4, s4, 1
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: s_mul_i32 s0, s2, s4
+; GFX8DAGISEL-NEXT: s_mul_i32 s1, s3, s4
+; GFX8DAGISEL-NEXT: s_mul_hi_u32 s2, s2, s4
+; GFX8DAGISEL-NEXT: s_add_u32 s1, s2, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: uniform_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX8GISEL-NEXT: s_and_b32 s5, s4, 1
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mul_i32 s4, s2, s5
+; GFX8GISEL-NEXT: s_mul_i32 s3, s3, s5
+; GFX8GISEL-NEXT: s_mul_hi_u32 s2, s2, s5
+; GFX8GISEL-NEXT: s_add_u32 s5, s2, s3
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: uniform_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9DAGISEL-NEXT: s_and_b32 s5, s4, 1
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_mul_i32 s4, s2, s5
+; GFX9DAGISEL-NEXT: s_mul_i32 s3, s3, s5
+; GFX9DAGISEL-NEXT: s_mul_hi_u32 s2, s2, s5
+; GFX9DAGISEL-NEXT: s_add_u32 s5, s2, s3
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: uniform_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9GISEL-NEXT: s_and_b32 s5, s4, 1
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mul_i32 s4, s2, s5
+; GFX9GISEL-NEXT: s_mul_i32 s3, s3, s5
+; GFX9GISEL-NEXT: s_mul_hi_u32 s2, s2, s5
+; GFX9GISEL-NEXT: s_add_u32 s5, s2, s3
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX1064DAGISEL-LABEL: uniform_value_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064DAGISEL-NEXT: s_and_b32 s4, s4, 1
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s5, s2, s4
+; GFX1064DAGISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1064DAGISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT: s_endpgm
+;
+; GFX1064GISEL-LABEL: uniform_value_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064GISEL-NEXT: s_and_b32 s4, s4, 1
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1064GISEL-NEXT: s_mul_hi_u32 s5, s2, s4
+; GFX1064GISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1064GISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT: s_endpgm
+;
+; GFX1032DAGISEL-LABEL: uniform_value_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT: s_mov_b32 s4, exec_lo
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s4, s4
+; GFX1032DAGISEL-NEXT: s_and_b32 s4, s4, 1
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s5, s2, s4
+; GFX1032DAGISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1032DAGISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT: s_endpgm
+;
+; GFX1032GISEL-LABEL: uniform_value_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT: s_mov_b32 s4, exec_lo
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s4, s4
+; GFX1032GISEL-NEXT: s_and_b32 s4, s4, 1
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1032GISEL-NEXT: s_mul_hi_u32 s5, s2, s4
+; GFX1032GISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1032GISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: uniform_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: s_and_b32 s4, s4, 1
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s5, s2, s4
+; GFX1164DAGISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1164DAGISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: uniform_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: s_and_b32 s4, s4, 1
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1164GISEL-NEXT: s_mul_hi_u32 s5, s2, s4
+; GFX1164GISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1164GISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: uniform_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: s_mov_b32 s4, exec_lo
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s4, s4
+; GFX1132DAGISEL-NEXT: s_and_b32 s4, s4, 1
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s5, s2, s4
+; GFX1132DAGISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1132DAGISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: uniform_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: s_mov_b32 s4, exec_lo
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s4, s4
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: s_and_b32 s4, s4, 1
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1132GISEL-NEXT: s_mul_hi_u32 s5, s2, s4
+; GFX1132GISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX1132GISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.xor.i64(i64 %in, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
+; GFX8DAGISEL-LABEL: const_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX8DAGISEL-NEXT: s_and_b32 s3, s2, 1
+; GFX8DAGISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX8DAGISEL-NEXT: s_mul_i32 s4, s3, 0
+; GFX8DAGISEL-NEXT: s_mul_hi_u32 s3, 0x7b, s3
+; GFX8DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: const_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX8GISEL-NEXT: s_and_b32 s3, s2, 1
+; GFX8GISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX8GISEL-NEXT: s_mul_i32 s4, s3, 0
+; GFX8GISEL-NEXT: s_mul_hi_u32 s3, 0x7b, s3
+; GFX8GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: const_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9DAGISEL-NEXT: s_and_b32 s3, s2, 1
+; GFX9DAGISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX9DAGISEL-NEXT: s_mul_i32 s4, s3, 0
+; GFX9DAGISEL-NEXT: s_mul_hi_u32 s3, 0x7b, s3
+; GFX9DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: const_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9GISEL-NEXT: s_and_b32 s3, s2, 1
+; GFX9GISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX9GISEL-NEXT: s_mul_i32 s4, s3, 0
+; GFX9GISEL-NEXT: s_mul_hi_u32 s3, 0x7b, s3
+; GFX9GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX1064DAGISEL-LABEL: const_value_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064DAGISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1064DAGISEL-NEXT: s_mul_i32 s3, s2, 0
+; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
+; GFX1064DAGISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1064DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT: s_endpgm
+;
+; GFX1064GISEL-LABEL: const_value_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1064GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064GISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1064GISEL-NEXT: s_mul_i32 s3, s2, 0
+; GFX1064GISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
+; GFX1064GISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1064GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT: s_endpgm
+;
+; GFX1032DAGISEL-LABEL: const_value_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1032DAGISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1032DAGISEL-NEXT: s_mul_i32 s3, s2, 0
+; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
+; GFX1032DAGISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1032DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT: s_endpgm
+;
+; GFX1032GISEL-LABEL: const_value_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1032GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1032GISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1032GISEL-NEXT: s_mul_i32 s3, s2, 0
+; GFX1032GISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
+; GFX1032GISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1032GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: const_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1164DAGISEL-NEXT: s_mul_i32 s3, s2, 0
+; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
+; GFX1164DAGISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1164DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: const_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1164GISEL-NEXT: s_mul_i32 s3, s2, 0
+; GFX1164GISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
+; GFX1164GISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1164GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: const_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1132DAGISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_mul_i32 s3, s2, 0
+; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
+; GFX1132DAGISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1132DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: const_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1132GISEL-NEXT: s_mul_i32 s3, s2, 0
+; GFX1132GISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
+; GFX1132GISEL-NEXT: s_mulk_i32 s2, 0x7b
+; GFX1132GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.xor.i64(i64 123, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: poison_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX8DAGISEL-NEXT: s_and_b32 s3, s2, 1
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX8DAGISEL-NEXT: s_mul_i32 s4, s1, s3
+; GFX8DAGISEL-NEXT: s_mul_hi_u32 s3, s0, s3
+; GFX8DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: poison_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX8GISEL-NEXT: s_and_b32 s3, s2, 1
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX8GISEL-NEXT: s_mul_i32 s4, s1, s3
+; GFX8GISEL-NEXT: s_mul_hi_u32 s3, s0, s3
+; GFX8GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: poison_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9DAGISEL-NEXT: s_and_b32 s3, s2, 1
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX9DAGISEL-NEXT: s_mul_i32 s4, s1, s3
+; GFX9DAGISEL-NEXT: s_mul_hi_u32 s3, s0, s3
+; GFX9DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: poison_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9GISEL-NEXT: s_and_b32 s3, s2, 1
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX9GISEL-NEXT: s_mul_i32 s4, s1, s3
+; GFX9GISEL-NEXT: s_mul_hi_u32 s3, s0, s3
+; GFX9GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX1064DAGISEL-LABEL: poison_value_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064DAGISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_mul_i32 s3, s1, s2
+; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s4, s0, s2
+; GFX1064DAGISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1064DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT: s_endpgm
+;
+; GFX1064GISEL-LABEL: poison_value_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1064GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064GISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mul_i32 s3, s1, s2
+; GFX1064GISEL-NEXT: s_mul_hi_u32 s4, s0, s2
+; GFX1064GISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1064GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT: s_endpgm
+;
+; GFX1032DAGISEL-LABEL: poison_value_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1032DAGISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_mul_i32 s3, s1, s2
+; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s4, s0, s2
+; GFX1032DAGISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1032DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT: s_endpgm
+;
+; GFX1032GISEL-LABEL: poison_value_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX1032GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1032GISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mul_i32 s3, s1, s2
+; GFX1032GISEL-NEXT: s_mul_hi_u32 s4, s0, s2
+; GFX1032GISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1032GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: poison_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_mul_i32 s3, s1, s2
+; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s4, s0, s2
+; GFX1164DAGISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1164DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: poison_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mul_i32 s3, s1, s2
+; GFX1164GISEL-NEXT: s_mul_hi_u32 s4, s0, s2
+; GFX1164GISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1164GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: poison_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1132DAGISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_mul_i32 s3, s1, s2
+; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s4, s0, s2
+; GFX1132DAGISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1132DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: poison_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mul_i32 s3, s1, s2
+; GFX1132GISEL-NEXT: s_mul_hi_u32 s4, s0, s2
+; GFX1132GISEL-NEXT: s_mul_i32 s2, s0, s2
+; GFX1132GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.xor.i64(i64 poison, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
+; GFX8DAGISEL-LABEL: divergent_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s10, s[6:7]
+; GFX8DAGISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX8DAGISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX8DAGISEL-NEXT: s_bitset0_b64 s[6:7], s10
+; GFX8DAGISEL-NEXT: s_xor_b64 s[4:5], s[4:5], s[8:9]
+; GFX8DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX8DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX8DAGISEL-NEXT: ; %bb.2:
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8GISEL-LABEL: divergent_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b32 s4, 0
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT: s_mov_b32 s5, s4
+; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8GISEL-NEXT: s_ff1_i32_b64 s10, s[6:7]
+; GFX8GISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX8GISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX8GISEL-NEXT: s_bitset0_b64 s[6:7], s10
+; GFX8GISEL-NEXT: s_xor_b64 s[4:5], s[4:5], s[8:9]
+; GFX8GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX8GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX8GISEL-NEXT: ; %bb.2:
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9DAGISEL-LABEL: divergent_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s10, s[6:7]
+; GFX9DAGISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX9DAGISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX9DAGISEL-NEXT: s_bitset0_b64 s[6:7], s10
+; GFX9DAGISEL-NEXT: s_xor_b64 s[4:5], s[4:5], s[8:9]
+; GFX9DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX9DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX9DAGISEL-NEXT: ; %bb.2:
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9GISEL-LABEL: divergent_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b32 s4, 0
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT: s_mov_b32 s5, s4
+; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9GISEL-NEXT: s_ff1_i32_b64 s10, s[6:7]
+; GFX9GISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX9GISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX9GISEL-NEXT: s_bitset0_b64 s[6:7], s10
+; GFX9GISEL-NEXT: s_xor_b64 s[4:5], s[4:5], s[8:9]
+; GFX9GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX9GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX9GISEL-NEXT: ; %bb.2:
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX9GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064DAGISEL-LABEL: divergent_value_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s10, s[6:7]
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX1064DAGISEL-NEXT: s_bitset0_b64 s[6:7], s10
+; GFX1064DAGISEL-NEXT: s_xor_b64 s[4:5], s[4:5], s[8:9]
+; GFX1064DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX1064DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1064DAGISEL-NEXT: ; %bb.2:
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064GISEL-LABEL: divergent_value_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b32 s4, 0
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT: s_mov_b32 s5, s4
+; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064GISEL-NEXT: s_ff1_i32_b64 s10, s[6:7]
+; GFX1064GISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX1064GISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX1064GISEL-NEXT: s_bitset0_b64 s[6:7], s10
+; GFX1064GISEL-NEXT: s_xor_b64 s[4:5], s[4:5], s[8:9]
+; GFX1064GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX1064GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1064GISEL-NEXT: ; %bb.2:
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1064GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032DAGISEL-LABEL: divergent_value_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s8, v2, s7
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s9, v3, s7
+; GFX1032DAGISEL-NEXT: s_bitset0_b32 s6, s7
+; GFX1032DAGISEL-NEXT: s_xor_b64 s[4:5], s[4:5], s[8:9]
+; GFX1032DAGISEL-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1032DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1032DAGISEL-NEXT: ; %bb.2:
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032GISEL-LABEL: divergent_value_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b32 s4, 0
+; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT: s_mov_b32 s5, s4
+; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
+; GFX1032GISEL-NEXT: v_readlane_b32 s8, v2, s7
+; GFX1032GISEL-NEXT: v_readlane_b32 s9, v3, s7
+; GFX1032GISEL-NEXT: s_bitset0_b32 s6, s7
+; GFX1032GISEL-NEXT: s_xor_b64 s[4:5], s[4:5], s[8:9]
+; GFX1032GISEL-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1032GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1032GISEL-NEXT: ; %bb.2:
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164DAGISEL-LABEL: divergent_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_mov_b32 s0, 0
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT: s_mov_b32 s1, s0
+; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s6, s[2:3]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s4, v2, s6
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v3, s6
+; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s6
+; GFX1164DAGISEL-NEXT: s_xor_b64 s[0:1], s[0:1], s[4:5]
+; GFX1164DAGISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1164DAGISEL-NEXT: ; %bb.2:
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1164DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164GISEL-LABEL: divergent_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b32 s0, 0
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT: s_mov_b32 s1, s0
+; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT: s_ctz_i32_b64 s6, s[2:3]
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: v_readlane_b32 s4, v2, s6
+; GFX1164GISEL-NEXT: v_readlane_b32 s5, v3, s6
+; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s6
+; GFX1164GISEL-NEXT: s_xor_b64 s[0:1], s[0:1], s[4:5]
+; GFX1164GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1164GISEL-NEXT: ; %bb.2:
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1164GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132DAGISEL-LABEL: divergent_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_mov_b32 s0, 0
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT: s_mov_b32 s1, s0
+; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX1132DAGISEL-NEXT: s_xor_b64 s[0:1], s[0:1], s[4:5]
+; GFX1132DAGISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1132DAGISEL-NEXT: ; %bb.2:
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132GISEL-LABEL: divergent_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b32 s0, 0
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT: s_mov_b32 s1, s0
+; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX1132GISEL-NEXT: s_xor_b64 s[0:1], s[0:1], s[4:5]
+; GFX1132GISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1132GISEL-NEXT: ; %bb.2:
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132GISEL-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.xor.i64(i64 %id.x, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64 %in2) {
+; GFX8DAGISEL-LABEL: divergent_cfg_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX8DAGISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX8DAGISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX8DAGISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX8DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8DAGISEL-NEXT: s_and_b32 s7, s6, 1
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_mul_i32 s6, s2, s7
+; GFX8DAGISEL-NEXT: s_mul_i32 s3, s3, s7
+; GFX8DAGISEL-NEXT: s_mul_hi_u32 s2, s2, s7
+; GFX8DAGISEL-NEXT: s_add_u32 s7, s2, s3
+; GFX8DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[8:9]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX8DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
+; GFX8DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8DAGISEL-NEXT: s_and_b32 s7, s6, 1
+; GFX8DAGISEL-NEXT: s_mul_i32 s6, s4, s7
+; GFX8DAGISEL-NEXT: s_mul_i32 s5, s5, s7
+; GFX8DAGISEL-NEXT: s_mul_hi_u32 s4, s4, s7
+; GFX8DAGISEL-NEXT: s_add_u32 s7, s4, s5
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX8DAGISEL-NEXT: ; %bb.4: ; %endif
+; GFX8DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: divergent_cfg_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX8GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX8GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX8GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX8GISEL-NEXT: ; %bb.1: ; %else
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8GISEL-NEXT: s_and_b32 s7, s6, 1
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mul_i32 s6, s2, s7
+; GFX8GISEL-NEXT: s_mul_i32 s3, s3, s7
+; GFX8GISEL-NEXT: s_mul_hi_u32 s2, s2, s7
+; GFX8GISEL-NEXT: s_add_u32 s7, s2, s3
+; GFX8GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX8GISEL-NEXT: ; %bb.3: ; %if
+; GFX8GISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX8GISEL-NEXT: s_and_b32 s7, s6, 1
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mul_i32 s6, s4, s7
+; GFX8GISEL-NEXT: s_mul_i32 s5, s5, s7
+; GFX8GISEL-NEXT: s_mul_hi_u32 s4, s4, s7
+; GFX8GISEL-NEXT: s_add_u32 s7, s4, s5
+; GFX8GISEL-NEXT: .LBB9_4: ; %endif
+; GFX8GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: divergent_cfg_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX9DAGISEL-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9DAGISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX9DAGISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX9DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9DAGISEL-NEXT: s_and_b32 s5, s4, 1
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_mul_i32 s4, s2, s5
+; GFX9DAGISEL-NEXT: s_mul_i32 s3, s3, s5
+; GFX9DAGISEL-NEXT: s_mul_hi_u32 s2, s2, s5
+; GFX9DAGISEL-NEXT: s_add_u32 s5, s2, s3
+; GFX9DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[8:9]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
+; GFX9DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9DAGISEL-NEXT: s_and_b32 s5, s4, 1
+; GFX9DAGISEL-NEXT: s_mul_i32 s4, s6, s5
+; GFX9DAGISEL-NEXT: s_mul_i32 s7, s7, s5
+; GFX9DAGISEL-NEXT: s_mul_hi_u32 s5, s6, s5
+; GFX9DAGISEL-NEXT: s_add_u32 s5, s5, s7
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9DAGISEL-NEXT: ; %bb.4: ; %endif
+; GFX9DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: divergent_cfg_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX9GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX9GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX9GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX9GISEL-NEXT: ; %bb.1: ; %else
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX9GISEL-NEXT: s_and_b32 s7, s6, 1
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mul_i32 s6, s2, s7
+; GFX9GISEL-NEXT: s_mul_i32 s3, s3, s7
+; GFX9GISEL-NEXT: s_mul_hi_u32 s2, s2, s7
+; GFX9GISEL-NEXT: s_add_u32 s7, s2, s3
+; GFX9GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX9GISEL-NEXT: ; %bb.3: ; %if
+; GFX9GISEL-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
+; GFX9GISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9GISEL-NEXT: s_and_b32 s4, s4, 1
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mul_i32 s6, s8, s4
+; GFX9GISEL-NEXT: s_mul_i32 s5, s9, s4
+; GFX9GISEL-NEXT: s_mul_hi_u32 s4, s8, s4
+; GFX9GISEL-NEXT: s_add_u32 s7, s4, s5
+; GFX9GISEL-NEXT: .LBB9_4: ; %endif
+; GFX9GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX1064DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_clause 0x1
+; GFX1064DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX1064DAGISEL-NEXT: ; implicit-def: $sgpr8_sgpr9
+; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064DAGISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1064DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[8:9], exec
+; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s8, s[8:9]
+; GFX1064DAGISEL-NEXT: s_and_b32 s8, s8, 1
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_mul_i32 s3, s3, s8
+; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s9, s2, s8
+; GFX1064DAGISEL-NEXT: s_mul_i32 s8, s2, s8
+; GFX1064DAGISEL-NEXT: s_add_u32 s9, s9, s3
+; GFX1064DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[4:5]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s8
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s9
+; GFX1064DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
+; GFX1064DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064DAGISEL-NEXT: s_and_b32 s4, s4, 1
+; GFX1064DAGISEL-NEXT: s_mul_i32 s5, s7, s4
+; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s7, s6, s4
+; GFX1064DAGISEL-NEXT: s_mul_i32 s4, s6, s4
+; GFX1064DAGISEL-NEXT: s_add_u32 s5, s7, s5
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX1064DAGISEL-NEXT: ; %bb.4: ; %endif
+; GFX1064DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT: s_endpgm
+;
+; GFX1064GISEL-LABEL: divergent_cfg_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX1064GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1064GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX1064GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1064GISEL-NEXT: ; %bb.1: ; %else
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1064GISEL-NEXT: s_and_b32 s6, s6, 1
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mul_i32 s3, s3, s6
+; GFX1064GISEL-NEXT: s_mul_hi_u32 s7, s2, s6
+; GFX1064GISEL-NEXT: s_mul_i32 s6, s2, s6
+; GFX1064GISEL-NEXT: s_add_u32 s7, s7, s3
+; GFX1064GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1064GISEL-NEXT: ; %bb.3: ; %if
+; GFX1064GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064GISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064GISEL-NEXT: s_and_b32 s4, s4, 1
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mul_i32 s5, s7, s4
+; GFX1064GISEL-NEXT: s_mul_hi_u32 s7, s6, s4
+; GFX1064GISEL-NEXT: s_mul_i32 s6, s6, s4
+; GFX1064GISEL-NEXT: s_add_u32 s7, s7, s5
+; GFX1064GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1064GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT: s_endpgm
+;
+; GFX1032DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_clause 0x1
+; GFX1032DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc_lo, 15, v0
+; GFX1032DAGISEL-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX1032DAGISEL-NEXT: s_and_saveexec_b32 s8, vcc_lo
+; GFX1032DAGISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1032DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX1032DAGISEL-NEXT: s_mov_b32 s4, exec_lo
+; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s4, s4
+; GFX1032DAGISEL-NEXT: s_and_b32 s4, s4, 1
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s5, s2, s4
+; GFX1032DAGISEL-NEXT: s_mul_i32 s4, s2, s4
+; GFX1032DAGISEL-NEXT: s_add_u32 s5, s5, s3
+; GFX1032DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s2, s8
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX1032DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s2
+; GFX1032DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX1032DAGISEL-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1032DAGISEL-NEXT: s_and_b32 s3, s3, 1
+; GFX1032DAGISEL-NEXT: s_mul_i32 s5, s7, s3
+; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s7, s6, s3
+; GFX1032DAGISEL-NEXT: s_mul_i32 s4, s6, s3
+; GFX1032DAGISEL-NEXT: s_add_u32 s5, s7, s5
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX1032DAGISEL-NEXT: ; %bb.4: ; %endif
+; GFX1032DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT: s_endpgm
+;
+; GFX1032GISEL-LABEL: divergent_cfg_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 16, v0
+; GFX1032GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1032GISEL-NEXT: s_and_saveexec_b32 s8, vcc_lo
+; GFX1032GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1032GISEL-NEXT: ; %bb.1: ; %else
+; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s6, s6
+; GFX1032GISEL-NEXT: s_and_b32 s6, s6, 1
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mul_i32 s3, s3, s6
+; GFX1032GISEL-NEXT: s_mul_hi_u32 s7, s2, s6
+; GFX1032GISEL-NEXT: s_mul_i32 s6, s2, s6
+; GFX1032GISEL-NEXT: s_add_u32 s7, s7, s3
+; GFX1032GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s2, s8
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1032GISEL-NEXT: ; %bb.3: ; %if
+; GFX1032GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032GISEL-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1032GISEL-NEXT: s_and_b32 s3, s3, 1
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mul_i32 s4, s7, s3
+; GFX1032GISEL-NEXT: s_mul_hi_u32 s5, s6, s3
+; GFX1032GISEL-NEXT: s_mul_i32 s6, s6, s3
+; GFX1032GISEL-NEXT: s_add_u32 s7, s5, s4
+; GFX1032GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1032GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_clause 0x1
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT: ; implicit-def: $sgpr8_sgpr9
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX1164DAGISEL-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
+; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1164DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[8:9], exec
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s8, s[8:9]
+; GFX1164DAGISEL-NEXT: s_and_b32 s8, s8, 1
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_mul_i32 s3, s3, s8
+; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s9, s2, s8
+; GFX1164DAGISEL-NEXT: s_mul_i32 s8, s2, s8
+; GFX1164DAGISEL-NEXT: s_add_u32 s9, s9, s3
+; GFX1164DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[6:7]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s8
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s9
+; GFX1164DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
+; GFX1164DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: s_and_b32 s6, s6, 1
+; GFX1164DAGISEL-NEXT: s_mul_i32 s5, s5, s6
+; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s7, s4, s6
+; GFX1164DAGISEL-NEXT: s_mul_i32 s4, s4, s6
+; GFX1164DAGISEL-NEXT: s_add_u32 s5, s7, s5
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX1164DAGISEL-NEXT: ; %bb.4: ; %endif
+; GFX1164DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: divergent_cfg_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164GISEL-NEXT: s_mov_b64 s[8:9], exec
+; GFX1164GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
+; GFX1164GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1164GISEL-NEXT: ; %bb.1: ; %else
+; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164GISEL-NEXT: s_and_b32 s6, s6, 1
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mul_i32 s3, s3, s6
+; GFX1164GISEL-NEXT: s_mul_hi_u32 s7, s2, s6
+; GFX1164GISEL-NEXT: s_mul_i32 s6, s2, s6
+; GFX1164GISEL-NEXT: s_add_u32 s7, s7, s3
+; GFX1164GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[2:3], s[8:9]
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1164GISEL-NEXT: ; %bb.3: ; %if
+; GFX1164GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164GISEL-NEXT: s_and_b32 s6, s6, 1
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mul_i32 s5, s5, s6
+; GFX1164GISEL-NEXT: s_mul_hi_u32 s7, s4, s6
+; GFX1164GISEL-NEXT: s_mul_i32 s6, s4, s6
+; GFX1164GISEL-NEXT: s_add_u32 s7, s7, s5
+; GFX1164GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1164GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_clause 0x1
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132DAGISEL-NEXT: s_mov_b32 s8, exec_lo
+; GFX1132DAGISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX1132DAGISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1132DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX1132DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s6, s6
+; GFX1132DAGISEL-NEXT: s_and_b32 s6, s6, 1
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_mul_i32 s3, s3, s6
+; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s7, s2, s6
+; GFX1132DAGISEL-NEXT: s_mul_i32 s6, s2, s6
+; GFX1132DAGISEL-NEXT: s_add_u32 s7, s7, s3
+; GFX1132DAGISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s2, s8
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s2
+; GFX1132DAGISEL-NEXT: ; %bb.3: ; %if
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: s_and_b32 s3, s3, 1
+; GFX1132DAGISEL-NEXT: s_mul_i32 s5, s5, s3
+; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s6, s4, s3
+; GFX1132DAGISEL-NEXT: s_mul_i32 s4, s4, s3
+; GFX1132DAGISEL-NEXT: s_add_u32 s5, s6, s5
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX1132DAGISEL-NEXT: ; %bb.4: ; %endif
+; GFX1132DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: divergent_cfg_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132GISEL-NEXT: s_mov_b32 s8, exec_lo
+; GFX1132GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
+; GFX1132GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1132GISEL-NEXT: ; %bb.1: ; %else
+; GFX1132GISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s6, s6
+; GFX1132GISEL-NEXT: s_and_b32 s6, s6, 1
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mul_i32 s3, s3, s6
+; GFX1132GISEL-NEXT: s_mul_hi_u32 s7, s2, s6
+; GFX1132GISEL-NEXT: s_mul_i32 s6, s2, s6
+; GFX1132GISEL-NEXT: s_add_u32 s7, s7, s3
+; GFX1132GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s2, s8
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1132GISEL-NEXT: ; %bb.3: ; %if
+; GFX1132GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132GISEL-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1132GISEL-NEXT: s_and_b32 s3, s3, 1
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mul_i32 s5, s5, s3
+; GFX1132GISEL-NEXT: s_mul_hi_u32 s7, s4, s3
+; GFX1132GISEL-NEXT: s_mul_i32 s6, s4, s3
+; GFX1132GISEL-NEXT: s_add_u32 s7, s7, s5
+; GFX1132GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1132GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %d_cmp = icmp ult i32 %tid, 16
+ br i1 %d_cmp, label %if, label %else
+
+if:
+ %reducedValTid = call i64 @llvm.amdgcn.wave.reduce.xor.i64(i64 %in2, i32 1)
+ br label %endif
+
+else:
+ %reducedValIn = call i64 @llvm.amdgcn.wave.reduce.xor.i64(i64 %in, i32 1)
+ br label %endif
+
+endif:
+ %combine = phi i64 [%reducedValTid, %if], [%reducedValIn, %else]
+ store i64 %combine, ptr addrspace(1) %out
+ ret void
+}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GFX10DAGISEL: {{.*}}
; GFX10GISEL: {{.*}}
>From 4a37b48ad185016692db93838379b94312a69a70 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Thu, 31 Jul 2025 14:18:31 +0530
Subject: [PATCH 2/4] Removing Redundant Instructions
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 15 +-
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll | 420 +++++++-----------
2 files changed, 150 insertions(+), 285 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index d8b155d05d8bf..65606a8427b1d 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5206,10 +5206,6 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
Register DestSub1 =
MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
- Register Op1H_Op0L_Reg =
- MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
- Register CarryReg =
- MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
const TargetRegisterClass *SrcSubRC =
@@ -5224,19 +5220,10 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
.add(Op1L)
.addReg(ParityRegister);
- BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), Op1H_Op0L_Reg)
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DestSub1)
.add(Op1H)
.addReg(ParityRegister);
- BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_HI_U32), CarryReg)
- .add(Op1L)
- .addReg(ParityRegister);
-
- BuildMI(BB, MI, DL, TII->get(AMDGPU::S_ADD_U32), DestSub1)
- .addReg(CarryReg)
- .addReg(Op1H_Op0L_Reg)
- .setOperandDead(3); // Dead scc
-
BuildMI(BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), DstReg)
.addReg(DestSub0)
.addImm(AMDGPU::sub0)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll
index b96954d030fef..9c523b2404121 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.xor.ll
@@ -1288,12 +1288,10 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX8DAGISEL-NEXT: s_and_b32 s4, s4, 1
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
-; GFX8DAGISEL-NEXT: s_mul_i32 s0, s2, s4
; GFX8DAGISEL-NEXT: s_mul_i32 s1, s3, s4
-; GFX8DAGISEL-NEXT: s_mul_hi_u32 s2, s2, s4
-; GFX8DAGISEL-NEXT: s_add_u32 s1, s2, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: s_mul_i32 s0, s2, s4
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
@@ -1304,15 +1302,13 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8GISEL-NEXT: s_mov_b64 s[4:5], exec
; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
-; GFX8GISEL-NEXT: s_and_b32 s5, s4, 1
+; GFX8GISEL-NEXT: s_and_b32 s4, s4, 1
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8GISEL-NEXT: s_mul_i32 s4, s2, s5
-; GFX8GISEL-NEXT: s_mul_i32 s3, s3, s5
-; GFX8GISEL-NEXT: s_mul_hi_u32 s2, s2, s5
-; GFX8GISEL-NEXT: s_add_u32 s5, s2, s3
-; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX8GISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX8GISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
-; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GFX8GISEL-NEXT: s_endpgm
@@ -1322,15 +1318,13 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], exec
; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
-; GFX9DAGISEL-NEXT: s_and_b32 s5, s4, 1
+; GFX9DAGISEL-NEXT: s_and_b32 s4, s4, 1
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9DAGISEL-NEXT: s_mul_i32 s4, s2, s5
-; GFX9DAGISEL-NEXT: s_mul_i32 s3, s3, s5
-; GFX9DAGISEL-NEXT: s_mul_hi_u32 s2, s2, s5
-; GFX9DAGISEL-NEXT: s_add_u32 s5, s2, s3
-; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s4
-; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9DAGISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX9DAGISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9DAGISEL-NEXT: s_endpgm
;
@@ -1339,15 +1333,13 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX9GISEL-NEXT: s_mov_b64 s[4:5], exec
; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
-; GFX9GISEL-NEXT: s_and_b32 s5, s4, 1
+; GFX9GISEL-NEXT: s_and_b32 s4, s4, 1
; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9GISEL-NEXT: s_mul_i32 s4, s2, s5
-; GFX9GISEL-NEXT: s_mul_i32 s3, s3, s5
-; GFX9GISEL-NEXT: s_mul_hi_u32 s2, s2, s5
-; GFX9GISEL-NEXT: s_add_u32 s5, s2, s3
-; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s4
-; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9GISEL-NEXT: s_mul_i32 s2, s2, s4
+; GFX9GISEL-NEXT: s_mul_i32 s3, s3, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9GISEL-NEXT: s_endpgm
;
@@ -1359,10 +1351,8 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX1064DAGISEL-NEXT: s_and_b32 s4, s4, 1
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064DAGISEL-NEXT: s_mul_i32 s3, s3, s4
-; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s5, s2, s4
; GFX1064DAGISEL-NEXT: s_mul_i32 s2, s2, s4
-; GFX1064DAGISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1064DAGISEL-NEXT: s_mul_i32 s3, s3, s4
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1376,10 +1366,8 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX1064GISEL-NEXT: s_and_b32 s4, s4, 1
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064GISEL-NEXT: s_mul_i32 s3, s3, s4
-; GFX1064GISEL-NEXT: s_mul_hi_u32 s5, s2, s4
; GFX1064GISEL-NEXT: s_mul_i32 s2, s2, s4
-; GFX1064GISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1064GISEL-NEXT: s_mul_i32 s3, s3, s4
; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1393,10 +1381,8 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s4, s4
; GFX1032DAGISEL-NEXT: s_and_b32 s4, s4, 1
; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032DAGISEL-NEXT: s_mul_i32 s3, s3, s4
-; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s5, s2, s4
; GFX1032DAGISEL-NEXT: s_mul_i32 s2, s2, s4
-; GFX1032DAGISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1032DAGISEL-NEXT: s_mul_i32 s3, s3, s4
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1410,10 +1396,8 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s4, s4
; GFX1032GISEL-NEXT: s_and_b32 s4, s4, 1
; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032GISEL-NEXT: s_mul_i32 s3, s3, s4
-; GFX1032GISEL-NEXT: s_mul_hi_u32 s5, s2, s4
; GFX1032GISEL-NEXT: s_mul_i32 s2, s2, s4
-; GFX1032GISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1032GISEL-NEXT: s_mul_i32 s3, s3, s4
; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1428,10 +1412,8 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1164DAGISEL-NEXT: s_and_b32 s4, s4, 1
; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164DAGISEL-NEXT: s_mul_i32 s3, s3, s4
-; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s5, s2, s4
; GFX1164DAGISEL-NEXT: s_mul_i32 s2, s2, s4
-; GFX1164DAGISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1164DAGISEL-NEXT: s_mul_i32 s3, s3, s4
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -1446,10 +1428,8 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1164GISEL-NEXT: s_and_b32 s4, s4, 1
; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164GISEL-NEXT: s_mul_i32 s3, s3, s4
-; GFX1164GISEL-NEXT: s_mul_hi_u32 s5, s2, s4
; GFX1164GISEL-NEXT: s_mul_i32 s2, s2, s4
-; GFX1164GISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1164GISEL-NEXT: s_mul_i32 s3, s3, s4
; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -1463,10 +1443,8 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s4, s4
; GFX1132DAGISEL-NEXT: s_and_b32 s4, s4, 1
; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132DAGISEL-NEXT: s_mul_i32 s3, s3, s4
-; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s5, s2, s4
; GFX1132DAGISEL-NEXT: s_mul_i32 s2, s2, s4
-; GFX1132DAGISEL-NEXT: s_add_u32 s3, s5, s3
+; GFX1132DAGISEL-NEXT: s_mul_i32 s3, s3, s4
; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -1478,14 +1456,11 @@ define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1132GISEL-NEXT: s_mov_b32 s4, exec_lo
; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s4, s4
-; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
; GFX1132GISEL-NEXT: s_and_b32 s4, s4, 1
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132GISEL-NEXT: s_mul_i32 s3, s3, s4
-; GFX1132GISEL-NEXT: s_mul_hi_u32 s5, s2, s4
; GFX1132GISEL-NEXT: s_mul_i32 s2, s2, s4
-; GFX1132GISEL-NEXT: s_add_u32 s3, s5, s3
-; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: s_mul_i32 s3, s3, s4
; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
@@ -1503,9 +1478,7 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX8DAGISEL-NEXT: s_and_b32 s3, s2, 1
; GFX8DAGISEL-NEXT: s_mul_i32 s2, s3, 0x7b
-; GFX8DAGISEL-NEXT: s_mul_i32 s4, s3, 0
-; GFX8DAGISEL-NEXT: s_mul_hi_u32 s3, 0x7b, s3
-; GFX8DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX8DAGISEL-NEXT: s_mul_i32 s3, s3, 0
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s2
@@ -1521,9 +1494,7 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
; GFX8GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX8GISEL-NEXT: s_and_b32 s3, s2, 1
; GFX8GISEL-NEXT: s_mul_i32 s2, s3, 0x7b
-; GFX8GISEL-NEXT: s_mul_i32 s4, s3, 0
-; GFX8GISEL-NEXT: s_mul_hi_u32 s3, 0x7b, s3
-; GFX8GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX8GISEL-NEXT: s_mul_i32 s3, s3, 0
; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
@@ -1539,9 +1510,7 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX9DAGISEL-NEXT: s_and_b32 s3, s2, 1
; GFX9DAGISEL-NEXT: s_mul_i32 s2, s3, 0x7b
-; GFX9DAGISEL-NEXT: s_mul_i32 s4, s3, 0
-; GFX9DAGISEL-NEXT: s_mul_hi_u32 s3, 0x7b, s3
-; GFX9DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX9DAGISEL-NEXT: s_mul_i32 s3, s3, 0
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
@@ -1556,9 +1525,7 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX9GISEL-NEXT: s_and_b32 s3, s2, 1
; GFX9GISEL-NEXT: s_mul_i32 s2, s3, 0x7b
-; GFX9GISEL-NEXT: s_mul_i32 s4, s3, 0
-; GFX9GISEL-NEXT: s_mul_hi_u32 s3, 0x7b, s3
-; GFX9GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX9GISEL-NEXT: s_mul_i32 s3, s3, 0
; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
@@ -1572,11 +1539,9 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
; GFX1064DAGISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
-; GFX1064DAGISEL-NEXT: s_and_b32 s2, s2, 1
-; GFX1064DAGISEL-NEXT: s_mul_i32 s3, s2, 0
-; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
-; GFX1064DAGISEL-NEXT: s_mulk_i32 s2, 0x7b
-; GFX1064DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1064DAGISEL-NEXT: s_and_b32 s3, s2, 1
+; GFX1064DAGISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX1064DAGISEL-NEXT: s_mul_i32 s3, s3, 0
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1589,11 +1554,9 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
; GFX1064GISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
-; GFX1064GISEL-NEXT: s_and_b32 s2, s2, 1
-; GFX1064GISEL-NEXT: s_mul_i32 s3, s2, 0
-; GFX1064GISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
-; GFX1064GISEL-NEXT: s_mulk_i32 s2, 0x7b
-; GFX1064GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1064GISEL-NEXT: s_and_b32 s3, s2, 1
+; GFX1064GISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX1064GISEL-NEXT: s_mul_i32 s3, s3, 0
; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1606,11 +1569,9 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
; GFX1032DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
-; GFX1032DAGISEL-NEXT: s_and_b32 s2, s2, 1
-; GFX1032DAGISEL-NEXT: s_mul_i32 s3, s2, 0
-; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
-; GFX1032DAGISEL-NEXT: s_mulk_i32 s2, 0x7b
-; GFX1032DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1032DAGISEL-NEXT: s_and_b32 s3, s2, 1
+; GFX1032DAGISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX1032DAGISEL-NEXT: s_mul_i32 s3, s3, 0
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1623,11 +1584,9 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
; GFX1032GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
-; GFX1032GISEL-NEXT: s_and_b32 s2, s2, 1
-; GFX1032GISEL-NEXT: s_mul_i32 s3, s2, 0
-; GFX1032GISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
-; GFX1032GISEL-NEXT: s_mulk_i32 s2, 0x7b
-; GFX1032GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1032GISEL-NEXT: s_and_b32 s3, s2, 1
+; GFX1032GISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX1032GISEL-NEXT: s_mul_i32 s3, s3, 0
; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1641,11 +1600,9 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1164DAGISEL-NEXT: s_and_b32 s2, s2, 1
-; GFX1164DAGISEL-NEXT: s_mul_i32 s3, s2, 0
-; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
-; GFX1164DAGISEL-NEXT: s_mulk_i32 s2, 0x7b
-; GFX1164DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1164DAGISEL-NEXT: s_and_b32 s3, s2, 1
+; GFX1164DAGISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX1164DAGISEL-NEXT: s_mul_i32 s3, s3, 0
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1659,11 +1616,9 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1164GISEL-NEXT: s_and_b32 s2, s2, 1
-; GFX1164GISEL-NEXT: s_mul_i32 s3, s2, 0
-; GFX1164GISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
-; GFX1164GISEL-NEXT: s_mulk_i32 s2, 0x7b
-; GFX1164GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1164GISEL-NEXT: s_and_b32 s3, s2, 1
+; GFX1164GISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX1164GISEL-NEXT: s_mul_i32 s3, s3, 0
; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1676,12 +1631,10 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
-; GFX1132DAGISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1132DAGISEL-NEXT: s_and_b32 s3, s2, 1
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132DAGISEL-NEXT: s_mul_i32 s3, s2, 0
-; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
-; GFX1132DAGISEL-NEXT: s_mulk_i32 s2, 0x7b
-; GFX1132DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1132DAGISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX1132DAGISEL-NEXT: s_mul_i32 s3, s3, 0
; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1695,11 +1648,9 @@ define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1132GISEL-NEXT: s_and_b32 s2, s2, 1
-; GFX1132GISEL-NEXT: s_mul_i32 s3, s2, 0
-; GFX1132GISEL-NEXT: s_mul_hi_u32 s4, 0x7b, s2
-; GFX1132GISEL-NEXT: s_mulk_i32 s2, 0x7b
-; GFX1132GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1132GISEL-NEXT: s_and_b32 s3, s2, 1
+; GFX1132GISEL-NEXT: s_mul_i32 s2, s3, 0x7b
+; GFX1132GISEL-NEXT: s_mul_i32 s3, s3, 0
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
@@ -1720,9 +1671,7 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX8DAGISEL-NEXT: s_and_b32 s3, s2, 1
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8DAGISEL-NEXT: s_mul_i32 s2, s0, s3
-; GFX8DAGISEL-NEXT: s_mul_i32 s4, s1, s3
-; GFX8DAGISEL-NEXT: s_mul_hi_u32 s3, s0, s3
-; GFX8DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX8DAGISEL-NEXT: s_mul_i32 s3, s1, s3
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s2
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
@@ -1738,9 +1687,7 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX8GISEL-NEXT: s_and_b32 s3, s2, 1
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8GISEL-NEXT: s_mul_i32 s2, s0, s3
-; GFX8GISEL-NEXT: s_mul_i32 s4, s1, s3
-; GFX8GISEL-NEXT: s_mul_hi_u32 s3, s0, s3
-; GFX8GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX8GISEL-NEXT: s_mul_i32 s3, s1, s3
; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s3
@@ -1757,9 +1704,7 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9DAGISEL-NEXT: s_mul_i32 s2, s0, s3
-; GFX9DAGISEL-NEXT: s_mul_i32 s4, s1, s3
-; GFX9DAGISEL-NEXT: s_mul_hi_u32 s3, s0, s3
-; GFX9DAGISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX9DAGISEL-NEXT: s_mul_i32 s3, s1, s3
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1774,9 +1719,7 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9GISEL-NEXT: s_mul_i32 s2, s0, s3
-; GFX9GISEL-NEXT: s_mul_i32 s4, s1, s3
-; GFX9GISEL-NEXT: s_mul_hi_u32 s3, s0, s3
-; GFX9GISEL-NEXT: s_add_u32 s3, s3, s4
+; GFX9GISEL-NEXT: s_mul_i32 s3, s1, s3
; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1788,12 +1731,10 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1064DAGISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
-; GFX1064DAGISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1064DAGISEL-NEXT: s_and_b32 s3, s2, 1
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064DAGISEL-NEXT: s_mul_i32 s3, s1, s2
-; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s4, s0, s2
-; GFX1064DAGISEL-NEXT: s_mul_i32 s2, s0, s2
-; GFX1064DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1064DAGISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX1064DAGISEL-NEXT: s_mul_i32 s3, s1, s3
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1805,12 +1746,10 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1064GISEL-NEXT: s_mov_b64 s[2:3], exec
; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
-; GFX1064GISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1064GISEL-NEXT: s_and_b32 s3, s2, 1
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064GISEL-NEXT: s_mul_i32 s3, s1, s2
-; GFX1064GISEL-NEXT: s_mul_hi_u32 s4, s0, s2
-; GFX1064GISEL-NEXT: s_mul_i32 s2, s0, s2
-; GFX1064GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1064GISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX1064GISEL-NEXT: s_mul_i32 s3, s1, s3
; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1822,12 +1761,10 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1032DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
-; GFX1032DAGISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1032DAGISEL-NEXT: s_and_b32 s3, s2, 1
; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032DAGISEL-NEXT: s_mul_i32 s3, s1, s2
-; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s4, s0, s2
-; GFX1032DAGISEL-NEXT: s_mul_i32 s2, s0, s2
-; GFX1032DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1032DAGISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX1032DAGISEL-NEXT: s_mul_i32 s3, s1, s3
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1839,12 +1776,10 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1032GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
-; GFX1032GISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1032GISEL-NEXT: s_and_b32 s3, s2, 1
; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032GISEL-NEXT: s_mul_i32 s3, s1, s2
-; GFX1032GISEL-NEXT: s_mul_hi_u32 s4, s0, s2
-; GFX1032GISEL-NEXT: s_mul_i32 s2, s0, s2
-; GFX1032GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1032GISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX1032GISEL-NEXT: s_mul_i32 s3, s1, s3
; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -1857,12 +1792,10 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164DAGISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1164DAGISEL-NEXT: s_and_b32 s3, s2, 1
; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164DAGISEL-NEXT: s_mul_i32 s3, s1, s2
-; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s4, s0, s2
-; GFX1164DAGISEL-NEXT: s_mul_i32 s2, s0, s2
-; GFX1164DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1164DAGISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX1164DAGISEL-NEXT: s_mul_i32 s3, s1, s3
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -1875,12 +1808,10 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164GISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1164GISEL-NEXT: s_and_b32 s3, s2, 1
; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164GISEL-NEXT: s_mul_i32 s3, s1, s2
-; GFX1164GISEL-NEXT: s_mul_hi_u32 s4, s0, s2
-; GFX1164GISEL-NEXT: s_mul_i32 s2, s0, s2
-; GFX1164GISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1164GISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX1164GISEL-NEXT: s_mul_i32 s3, s1, s3
; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -1892,12 +1823,10 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s2, s2
-; GFX1132DAGISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1132DAGISEL-NEXT: s_and_b32 s3, s2, 1
; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132DAGISEL-NEXT: s_mul_i32 s3, s1, s2
-; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s4, s0, s2
-; GFX1132DAGISEL-NEXT: s_mul_i32 s2, s0, s2
-; GFX1132DAGISEL-NEXT: s_add_u32 s3, s4, s3
+; GFX1132DAGISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX1132DAGISEL-NEXT: s_mul_i32 s3, s1, s3
; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, s2
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -1909,14 +1838,11 @@ define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s2, s2
-; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132GISEL-NEXT: s_and_b32 s2, s2, 1
+; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: s_and_b32 s3, s2, 1
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132GISEL-NEXT: s_mul_i32 s3, s1, s2
-; GFX1132GISEL-NEXT: s_mul_hi_u32 s4, s0, s2
-; GFX1132GISEL-NEXT: s_mul_i32 s2, s0, s2
-; GFX1132GISEL-NEXT: s_add_u32 s3, s4, s3
-; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132GISEL-NEXT: s_mul_i32 s2, s0, s3
+; GFX1132GISEL-NEXT: s_mul_i32 s3, s1, s3
; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
@@ -2194,9 +2120,7 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX8DAGISEL-NEXT: s_and_b32 s7, s6, 1
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8DAGISEL-NEXT: s_mul_i32 s6, s2, s7
-; GFX8DAGISEL-NEXT: s_mul_i32 s3, s3, s7
-; GFX8DAGISEL-NEXT: s_mul_hi_u32 s2, s2, s7
-; GFX8DAGISEL-NEXT: s_add_u32 s7, s2, s3
+; GFX8DAGISEL-NEXT: s_mul_i32 s7, s3, s7
; GFX8DAGISEL-NEXT: .LBB9_2: ; %Flow
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[8:9]
@@ -2206,13 +2130,11 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX8DAGISEL-NEXT: ; %bb.3: ; %if
; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX8DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
-; GFX8DAGISEL-NEXT: s_and_b32 s7, s6, 1
-; GFX8DAGISEL-NEXT: s_mul_i32 s6, s4, s7
-; GFX8DAGISEL-NEXT: s_mul_i32 s5, s5, s7
-; GFX8DAGISEL-NEXT: s_mul_hi_u32 s4, s4, s7
-; GFX8DAGISEL-NEXT: s_add_u32 s7, s4, s5
-; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s6
-; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX8DAGISEL-NEXT: s_and_b32 s6, s6, 1
+; GFX8DAGISEL-NEXT: s_mul_i32 s4, s4, s6
+; GFX8DAGISEL-NEXT: s_mul_i32 s5, s5, s6
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s5
; GFX8DAGISEL-NEXT: ; %bb.4: ; %endif
; GFX8DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
@@ -2234,9 +2156,7 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX8GISEL-NEXT: s_and_b32 s7, s6, 1
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8GISEL-NEXT: s_mul_i32 s6, s2, s7
-; GFX8GISEL-NEXT: s_mul_i32 s3, s3, s7
-; GFX8GISEL-NEXT: s_mul_hi_u32 s2, s2, s7
-; GFX8GISEL-NEXT: s_add_u32 s7, s2, s3
+; GFX8GISEL-NEXT: s_mul_i32 s7, s3, s7
; GFX8GISEL-NEXT: .LBB9_2: ; %Flow
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
@@ -2248,9 +2168,7 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX8GISEL-NEXT: s_and_b32 s7, s6, 1
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8GISEL-NEXT: s_mul_i32 s6, s4, s7
-; GFX8GISEL-NEXT: s_mul_i32 s5, s5, s7
-; GFX8GISEL-NEXT: s_mul_hi_u32 s4, s4, s7
-; GFX8GISEL-NEXT: s_add_u32 s7, s4, s5
+; GFX8GISEL-NEXT: s_mul_i32 s7, s5, s7
; GFX8GISEL-NEXT: .LBB9_4: ; %endif
; GFX8GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s6
@@ -2275,9 +2193,7 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX9DAGISEL-NEXT: s_and_b32 s5, s4, 1
; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9DAGISEL-NEXT: s_mul_i32 s4, s2, s5
-; GFX9DAGISEL-NEXT: s_mul_i32 s3, s3, s5
-; GFX9DAGISEL-NEXT: s_mul_hi_u32 s2, s2, s5
-; GFX9DAGISEL-NEXT: s_add_u32 s5, s2, s3
+; GFX9DAGISEL-NEXT: s_mul_i32 s5, s3, s5
; GFX9DAGISEL-NEXT: .LBB9_2: ; %Flow
; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[8:9]
@@ -2289,9 +2205,7 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX9DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX9DAGISEL-NEXT: s_and_b32 s5, s4, 1
; GFX9DAGISEL-NEXT: s_mul_i32 s4, s6, s5
-; GFX9DAGISEL-NEXT: s_mul_i32 s7, s7, s5
-; GFX9DAGISEL-NEXT: s_mul_hi_u32 s5, s6, s5
-; GFX9DAGISEL-NEXT: s_add_u32 s5, s5, s7
+; GFX9DAGISEL-NEXT: s_mul_i32 s5, s7, s5
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s4
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s5
; GFX9DAGISEL-NEXT: ; %bb.4: ; %endif
@@ -2314,23 +2228,19 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX9GISEL-NEXT: s_and_b32 s7, s6, 1
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9GISEL-NEXT: s_mul_i32 s6, s2, s7
-; GFX9GISEL-NEXT: s_mul_i32 s3, s3, s7
-; GFX9GISEL-NEXT: s_mul_hi_u32 s2, s2, s7
-; GFX9GISEL-NEXT: s_add_u32 s7, s2, s3
+; GFX9GISEL-NEXT: s_mul_i32 s7, s3, s7
; GFX9GISEL-NEXT: .LBB9_2: ; %Flow
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
; GFX9GISEL-NEXT: s_cbranch_execz .LBB9_4
; GFX9GISEL-NEXT: ; %bb.3: ; %if
-; GFX9GISEL-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
+; GFX9GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX9GISEL-NEXT: s_mov_b64 s[4:5], exec
; GFX9GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX9GISEL-NEXT: s_and_b32 s4, s4, 1
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9GISEL-NEXT: s_mul_i32 s6, s8, s4
-; GFX9GISEL-NEXT: s_mul_i32 s5, s9, s4
-; GFX9GISEL-NEXT: s_mul_hi_u32 s4, s8, s4
-; GFX9GISEL-NEXT: s_add_u32 s7, s4, s5
+; GFX9GISEL-NEXT: s_mul_i32 s6, s6, s4
+; GFX9GISEL-NEXT: s_mul_i32 s7, s7, s4
; GFX9GISEL-NEXT: .LBB9_4: ; %endif
; GFX9GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s6
@@ -2345,33 +2255,29 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX1064DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX1064DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
-; GFX1064DAGISEL-NEXT: ; implicit-def: $sgpr8_sgpr9
-; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX1064DAGISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX1064DAGISEL-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX1064DAGISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB9_2
; GFX1064DAGISEL-NEXT: ; %bb.1: ; %else
-; GFX1064DAGISEL-NEXT: s_mov_b64 s[8:9], exec
-; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s8, s[8:9]
-; GFX1064DAGISEL-NEXT: s_and_b32 s8, s8, 1
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
+; GFX1064DAGISEL-NEXT: s_and_b32 s5, s4, 1
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064DAGISEL-NEXT: s_mul_i32 s3, s3, s8
-; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s9, s2, s8
-; GFX1064DAGISEL-NEXT: s_mul_i32 s8, s2, s8
-; GFX1064DAGISEL-NEXT: s_add_u32 s9, s9, s3
+; GFX1064DAGISEL-NEXT: s_mul_i32 s4, s2, s5
+; GFX1064DAGISEL-NEXT: s_mul_i32 s5, s3, s5
; GFX1064DAGISEL-NEXT: .LBB9_2: ; %Flow
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[4:5]
-; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s8
-; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s9
+; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[8:9]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s5
; GFX1064DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
; GFX1064DAGISEL-NEXT: ; %bb.3: ; %if
; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], exec
; GFX1064DAGISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
-; GFX1064DAGISEL-NEXT: s_and_b32 s4, s4, 1
-; GFX1064DAGISEL-NEXT: s_mul_i32 s5, s7, s4
-; GFX1064DAGISEL-NEXT: s_mul_hi_u32 s7, s6, s4
-; GFX1064DAGISEL-NEXT: s_mul_i32 s4, s6, s4
-; GFX1064DAGISEL-NEXT: s_add_u32 s5, s7, s5
+; GFX1064DAGISEL-NEXT: s_and_b32 s5, s4, 1
+; GFX1064DAGISEL-NEXT: s_mul_i32 s4, s6, s5
+; GFX1064DAGISEL-NEXT: s_mul_i32 s5, s7, s5
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s4
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s5
; GFX1064DAGISEL-NEXT: ; %bb.4: ; %endif
@@ -2391,12 +2297,10 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX1064GISEL-NEXT: ; %bb.1: ; %else
; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
-; GFX1064GISEL-NEXT: s_and_b32 s6, s6, 1
+; GFX1064GISEL-NEXT: s_and_b32 s7, s6, 1
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064GISEL-NEXT: s_mul_i32 s3, s3, s6
-; GFX1064GISEL-NEXT: s_mul_hi_u32 s7, s2, s6
-; GFX1064GISEL-NEXT: s_mul_i32 s6, s2, s6
-; GFX1064GISEL-NEXT: s_add_u32 s7, s7, s3
+; GFX1064GISEL-NEXT: s_mul_i32 s6, s2, s7
+; GFX1064GISEL-NEXT: s_mul_i32 s7, s3, s7
; GFX1064GISEL-NEXT: .LBB9_2: ; %Flow
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
@@ -2407,10 +2311,8 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX1064GISEL-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX1064GISEL-NEXT: s_and_b32 s4, s4, 1
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064GISEL-NEXT: s_mul_i32 s5, s7, s4
-; GFX1064GISEL-NEXT: s_mul_hi_u32 s7, s6, s4
; GFX1064GISEL-NEXT: s_mul_i32 s6, s6, s4
-; GFX1064GISEL-NEXT: s_add_u32 s7, s7, s5
+; GFX1064GISEL-NEXT: s_mul_i32 s7, s7, s4
; GFX1064GISEL-NEXT: .LBB9_4: ; %endif
; GFX1064GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s6
@@ -2432,12 +2334,10 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX1032DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1032DAGISEL-NEXT: s_mov_b32 s4, exec_lo
; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s4, s4
-; GFX1032DAGISEL-NEXT: s_and_b32 s4, s4, 1
+; GFX1032DAGISEL-NEXT: s_and_b32 s5, s4, 1
; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032DAGISEL-NEXT: s_mul_i32 s3, s3, s4
-; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s5, s2, s4
-; GFX1032DAGISEL-NEXT: s_mul_i32 s4, s2, s4
-; GFX1032DAGISEL-NEXT: s_add_u32 s5, s5, s3
+; GFX1032DAGISEL-NEXT: s_mul_i32 s4, s2, s5
+; GFX1032DAGISEL-NEXT: s_mul_i32 s5, s3, s5
; GFX1032DAGISEL-NEXT: .LBB9_2: ; %Flow
; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s2, s8
@@ -2448,10 +2348,8 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX1032DAGISEL-NEXT: s_mov_b32 s3, exec_lo
; GFX1032DAGISEL-NEXT: s_bcnt1_i32_b32 s3, s3
; GFX1032DAGISEL-NEXT: s_and_b32 s3, s3, 1
-; GFX1032DAGISEL-NEXT: s_mul_i32 s5, s7, s3
-; GFX1032DAGISEL-NEXT: s_mul_hi_u32 s7, s6, s3
; GFX1032DAGISEL-NEXT: s_mul_i32 s4, s6, s3
-; GFX1032DAGISEL-NEXT: s_add_u32 s5, s7, s5
+; GFX1032DAGISEL-NEXT: s_mul_i32 s5, s7, s3
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s4
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s5
; GFX1032DAGISEL-NEXT: ; %bb.4: ; %endif
@@ -2471,12 +2369,10 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX1032GISEL-NEXT: ; %bb.1: ; %else
; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s6, s6
-; GFX1032GISEL-NEXT: s_and_b32 s6, s6, 1
+; GFX1032GISEL-NEXT: s_and_b32 s7, s6, 1
; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032GISEL-NEXT: s_mul_i32 s3, s3, s6
-; GFX1032GISEL-NEXT: s_mul_hi_u32 s7, s2, s6
-; GFX1032GISEL-NEXT: s_mul_i32 s6, s2, s6
-; GFX1032GISEL-NEXT: s_add_u32 s7, s7, s3
+; GFX1032GISEL-NEXT: s_mul_i32 s6, s2, s7
+; GFX1032GISEL-NEXT: s_mul_i32 s7, s3, s7
; GFX1032GISEL-NEXT: .LBB9_2: ; %Flow
; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s2, s8
@@ -2487,10 +2383,8 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX1032GISEL-NEXT: s_bcnt1_i32_b32 s3, s3
; GFX1032GISEL-NEXT: s_and_b32 s3, s3, 1
; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032GISEL-NEXT: s_mul_i32 s4, s7, s3
-; GFX1032GISEL-NEXT: s_mul_hi_u32 s5, s6, s3
; GFX1032GISEL-NEXT: s_mul_i32 s6, s6, s3
-; GFX1032GISEL-NEXT: s_add_u32 s7, s5, s4
+; GFX1032GISEL-NEXT: s_mul_i32 s7, s7, s3
; GFX1032GISEL-NEXT: .LBB9_4: ; %endif
; GFX1032GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s6
@@ -2505,27 +2399,25 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX1164DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
; GFX1164DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
-; GFX1164DAGISEL-NEXT: ; implicit-def: $sgpr8_sgpr9
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[8:9], exec
+; GFX1164DAGISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
-; GFX1164DAGISEL-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
+; GFX1164DAGISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB9_2
; GFX1164DAGISEL-NEXT: ; %bb.1: ; %else
-; GFX1164DAGISEL-NEXT: s_mov_b64 s[8:9], exec
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s8, s[8:9]
-; GFX1164DAGISEL-NEXT: s_and_b32 s8, s8, 1
+; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1164DAGISEL-NEXT: s_and_b32 s7, s6, 1
; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164DAGISEL-NEXT: s_mul_i32 s3, s3, s8
-; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s9, s2, s8
-; GFX1164DAGISEL-NEXT: s_mul_i32 s8, s2, s8
-; GFX1164DAGISEL-NEXT: s_add_u32 s9, s9, s3
+; GFX1164DAGISEL-NEXT: s_mul_i32 s6, s2, s7
+; GFX1164DAGISEL-NEXT: s_mul_i32 s7, s3, s7
; GFX1164DAGISEL-NEXT: .LBB9_2: ; %Flow
; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[6:7]
-; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s8
-; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s9
+; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[8:9]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s7
; GFX1164DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3]
; GFX1164DAGISEL-NEXT: ; %bb.3: ; %if
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
@@ -2533,10 +2425,8 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX1164DAGISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164DAGISEL-NEXT: s_and_b32 s6, s6, 1
-; GFX1164DAGISEL-NEXT: s_mul_i32 s5, s5, s6
-; GFX1164DAGISEL-NEXT: s_mul_hi_u32 s7, s4, s6
; GFX1164DAGISEL-NEXT: s_mul_i32 s4, s4, s6
-; GFX1164DAGISEL-NEXT: s_add_u32 s5, s7, s5
+; GFX1164DAGISEL-NEXT: s_mul_i32 s5, s5, s6
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s4
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s5
; GFX1164DAGISEL-NEXT: ; %bb.4: ; %endif
@@ -2559,12 +2449,10 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
-; GFX1164GISEL-NEXT: s_and_b32 s6, s6, 1
+; GFX1164GISEL-NEXT: s_and_b32 s7, s6, 1
; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164GISEL-NEXT: s_mul_i32 s3, s3, s6
-; GFX1164GISEL-NEXT: s_mul_hi_u32 s7, s2, s6
-; GFX1164GISEL-NEXT: s_mul_i32 s6, s2, s6
-; GFX1164GISEL-NEXT: s_add_u32 s7, s7, s3
+; GFX1164GISEL-NEXT: s_mul_i32 s6, s2, s7
+; GFX1164GISEL-NEXT: s_mul_i32 s7, s3, s7
; GFX1164GISEL-NEXT: .LBB9_2: ; %Flow
; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[2:3], s[8:9]
@@ -2574,12 +2462,10 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], exec
; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164GISEL-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
-; GFX1164GISEL-NEXT: s_and_b32 s6, s6, 1
+; GFX1164GISEL-NEXT: s_and_b32 s7, s6, 1
; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164GISEL-NEXT: s_mul_i32 s5, s5, s6
-; GFX1164GISEL-NEXT: s_mul_hi_u32 s7, s4, s6
-; GFX1164GISEL-NEXT: s_mul_i32 s6, s4, s6
-; GFX1164GISEL-NEXT: s_add_u32 s7, s7, s5
+; GFX1164GISEL-NEXT: s_mul_i32 s6, s4, s7
+; GFX1164GISEL-NEXT: s_mul_i32 s7, s5, s7
; GFX1164GISEL-NEXT: .LBB9_4: ; %endif
; GFX1164GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s6
@@ -2604,12 +2490,10 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX1132DAGISEL-NEXT: s_mov_b32 s6, exec_lo
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s6, s6
-; GFX1132DAGISEL-NEXT: s_and_b32 s6, s6, 1
+; GFX1132DAGISEL-NEXT: s_and_b32 s7, s6, 1
; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132DAGISEL-NEXT: s_mul_i32 s3, s3, s6
-; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s7, s2, s6
-; GFX1132DAGISEL-NEXT: s_mul_i32 s6, s2, s6
-; GFX1132DAGISEL-NEXT: s_add_u32 s7, s7, s3
+; GFX1132DAGISEL-NEXT: s_mul_i32 s6, s2, s7
+; GFX1132DAGISEL-NEXT: s_mul_i32 s7, s3, s7
; GFX1132DAGISEL-NEXT: .LBB9_2: ; %Flow
; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s2, s8
@@ -2621,10 +2505,8 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX1132DAGISEL-NEXT: s_bcnt1_i32_b32 s3, s3
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1132DAGISEL-NEXT: s_and_b32 s3, s3, 1
-; GFX1132DAGISEL-NEXT: s_mul_i32 s5, s5, s3
-; GFX1132DAGISEL-NEXT: s_mul_hi_u32 s6, s4, s3
; GFX1132DAGISEL-NEXT: s_mul_i32 s4, s4, s3
-; GFX1132DAGISEL-NEXT: s_add_u32 s5, s6, s5
+; GFX1132DAGISEL-NEXT: s_mul_i32 s5, s5, s3
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX1132DAGISEL-NEXT: ; %bb.4: ; %endif
@@ -2647,12 +2529,10 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX1132GISEL-NEXT: s_mov_b32 s6, exec_lo
; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s6, s6
-; GFX1132GISEL-NEXT: s_and_b32 s6, s6, 1
+; GFX1132GISEL-NEXT: s_and_b32 s7, s6, 1
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132GISEL-NEXT: s_mul_i32 s3, s3, s6
-; GFX1132GISEL-NEXT: s_mul_hi_u32 s7, s2, s6
-; GFX1132GISEL-NEXT: s_mul_i32 s6, s2, s6
-; GFX1132GISEL-NEXT: s_add_u32 s7, s7, s3
+; GFX1132GISEL-NEXT: s_mul_i32 s6, s2, s7
+; GFX1132GISEL-NEXT: s_mul_i32 s7, s3, s7
; GFX1132GISEL-NEXT: .LBB9_2: ; %Flow
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s2, s8
@@ -2664,10 +2544,8 @@ define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64
; GFX1132GISEL-NEXT: s_bcnt1_i32_b32 s3, s3
; GFX1132GISEL-NEXT: s_and_b32 s3, s3, 1
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132GISEL-NEXT: s_mul_i32 s5, s5, s3
-; GFX1132GISEL-NEXT: s_mul_hi_u32 s7, s4, s3
; GFX1132GISEL-NEXT: s_mul_i32 s6, s4, s3
-; GFX1132GISEL-NEXT: s_add_u32 s7, s7, s5
+; GFX1132GISEL-NEXT: s_mul_i32 s7, s5, s3
; GFX1132GISEL-NEXT: .LBB9_4: ; %endif
; GFX1132GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
>From aadbecbec8d32966c7cc29fb52124f89b9bc9daa Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Thu, 7 Aug 2025 11:38:46 +0530
Subject: [PATCH 3/4] Removing `break` before `else`
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 65606a8427b1d..7e4eb52c88f38 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5200,7 +5200,6 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DstReg)
.addReg(SrcReg)
.addReg(ParityRegister);
- break;
} else {
Register DestSub0 =
MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
@@ -5229,8 +5228,8 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
.addImm(AMDGPU::sub0)
.addReg(DestSub1)
.addImm(AMDGPU::sub1);
- break;
}
+ break;
}
case AMDGPU::S_SUB_I32: {
Register NegatedVal = MRI.createVirtualRegister(DstRegClass);
>From 49b080f2bc1fdad3d2ba77b5ee7748cfbf1b96b0 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Thu, 7 Aug 2025 11:55:38 +0530
Subject: [PATCH 4/4] Running Clang Format
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 98 +++++++++++------------
1 file changed, 49 insertions(+), 49 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 7e4eb52c88f38..fb56d8e3d2f97 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5182,55 +5182,55 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
BuildMI(BB, MI, DL, TII->get(BitCountOpc), NumActiveLanes)
.addReg(ExecMask);
- switch (Opc) {
- case AMDGPU::S_XOR_B32:
- case AMDGPU::S_XOR_B64: {
- // Performing an XOR operation on a uniform value
- // depends on the parity of the number of active lanes.
- // For even parity, the result will be 0, for odd
- // parity the result will be the same as the input value.
- Register ParityRegister =
- MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
-
- BuildMI(BB, MI, DL, TII->get(AMDGPU::S_AND_B32), ParityRegister)
- .addReg(NewAccumulator->getOperand(0).getReg())
- .addImm(1)
- .setOperandDead(3); // Dead scc
- if (is32BitOpc) {
- BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DstReg)
- .addReg(SrcReg)
- .addReg(ParityRegister);
- } else {
- Register DestSub0 =
- MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
- Register DestSub1 =
- MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
-
- const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
- const TargetRegisterClass *SrcSubRC =
- TRI->getSubRegisterClass(SrcRC, AMDGPU::sub0);
-
- MachineOperand Op1L = TII->buildExtractSubRegOrImm(
- MI, MRI, MI.getOperand(1), SrcRC, AMDGPU::sub0, SrcSubRC);
- MachineOperand Op1H = TII->buildExtractSubRegOrImm(
- MI, MRI, MI.getOperand(1), SrcRC, AMDGPU::sub1, SrcSubRC);
-
- BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DestSub0)
- .add(Op1L)
- .addReg(ParityRegister);
-
- BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DestSub1)
- .add(Op1H)
- .addReg(ParityRegister);
-
- BuildMI(BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), DstReg)
- .addReg(DestSub0)
- .addImm(AMDGPU::sub0)
- .addReg(DestSub1)
- .addImm(AMDGPU::sub1);
- }
- break;
- }
+ switch (Opc) {
+ case AMDGPU::S_XOR_B32:
+ case AMDGPU::S_XOR_B64: {
+ // Performing an XOR operation on a uniform value
+ // depends on the parity of the number of active lanes.
+ // For even parity, the result will be 0, for odd
+ // parity the result will be the same as the input value.
+ Register ParityRegister =
+ MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_AND_B32), ParityRegister)
+ .addReg(NewAccumulator->getOperand(0).getReg())
+ .addImm(1)
+ .setOperandDead(3); // Dead scc
+ if (is32BitOpc) {
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DstReg)
+ .addReg(SrcReg)
+ .addReg(ParityRegister);
+ } else {
+ Register DestSub0 =
+ MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ Register DestSub1 =
+ MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+
+ const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
+ const TargetRegisterClass *SrcSubRC =
+ TRI->getSubRegisterClass(SrcRC, AMDGPU::sub0);
+
+ MachineOperand Op1L = TII->buildExtractSubRegOrImm(
+ MI, MRI, MI.getOperand(1), SrcRC, AMDGPU::sub0, SrcSubRC);
+ MachineOperand Op1H = TII->buildExtractSubRegOrImm(
+ MI, MRI, MI.getOperand(1), SrcRC, AMDGPU::sub1, SrcSubRC);
+
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DestSub0)
+ .add(Op1L)
+ .addReg(ParityRegister);
+
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MUL_I32), DestSub1)
+ .add(Op1H)
+ .addReg(ParityRegister);
+
+ BuildMI(BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), DstReg)
+ .addReg(DestSub0)
+ .addImm(AMDGPU::sub0)
+ .addReg(DestSub1)
+ .addImm(AMDGPU::sub1);
+ }
+ break;
+ }
case AMDGPU::S_SUB_I32: {
Register NegatedVal = MRI.createVirtualRegister(DstRegClass);
More information about the llvm-branch-commits
mailing list