[llvm] [AMDGPU] Extending wave reduction intrinsics for `i64` types - 1 (PR #150169)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 30 04:27:30 PDT 2025
https://github.com/easyonaadit updated https://github.com/llvm/llvm-project/pull/150169
>From d2db1c36f0219655a7cb86d95d8310b46c3a9a58 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Sat, 19 Jul 2025 12:48:18 +0530
Subject: [PATCH] [AMDGPU] Extending wave reduction intrinsics for `i64` types
- 1
Supporting Min/Max Operations: `min`, `max`, `umin`, `umax`
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 177 +++-
llvm/lib/Target/AMDGPU/SIInstructions.td | 44 +-
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll | 2 +-
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll | 900 ++++++++++++++++++
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll | 900 ++++++++++++++++++
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll | 670 +++++++++++++
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll | 862 +++++++++++++++++
7 files changed, 3511 insertions(+), 44 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index d65c3ae76566b..0f529ef362199 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5095,12 +5095,16 @@ static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
static uint32_t getIdentityValueForWaveReduction(unsigned Opc) {
switch (Opc) {
case AMDGPU::S_MIN_U32:
+ case AMDGPU::V_CMP_LT_U64_e64: // umin.u64
return std::numeric_limits<uint32_t>::max();
case AMDGPU::S_MIN_I32:
+ case AMDGPU::V_CMP_LT_I64_e64: // min.i64
return std::numeric_limits<int32_t>::max();
case AMDGPU::S_MAX_U32:
+ case AMDGPU::V_CMP_GT_U64_e64: // umax.u64
return std::numeric_limits<uint32_t>::min();
case AMDGPU::S_MAX_I32:
+ case AMDGPU::V_CMP_GT_I64_e64: // max.i64
return std::numeric_limits<int32_t>::min();
case AMDGPU::S_ADD_I32:
case AMDGPU::S_SUB_I32:
@@ -5128,16 +5132,22 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
bool isSGPR = TRI->isSGPRClass(MRI.getRegClass(SrcReg));
Register DstReg = MI.getOperand(0).getReg();
MachineBasicBlock *RetBB = nullptr;
+ bool is32BitOpc = TRI->getRegSizeInBits(*MRI.getRegClass(DstReg)) == 32;
if (isSGPR) {
switch (Opc) {
case AMDGPU::S_MIN_U32:
+ case AMDGPU::V_CMP_LT_U64_e64: /*umin*/
case AMDGPU::S_MIN_I32:
+ case AMDGPU::V_CMP_LT_I64_e64: /*min*/
case AMDGPU::S_MAX_U32:
+ case AMDGPU::V_CMP_GT_U64_e64: /*umax*/
case AMDGPU::S_MAX_I32:
+ case AMDGPU::V_CMP_GT_I64_e64: /*max*/
case AMDGPU::S_AND_B32:
case AMDGPU::S_OR_B32: {
// Idempotent operations.
- BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MOV_B32), DstReg).addReg(SrcReg);
+ unsigned movOpc = is32BitOpc ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
+ BuildMI(BB, MI, DL, TII->get(movOpc), DstReg).addReg(SrcReg);
RetBB = &BB;
break;
}
@@ -5222,73 +5232,166 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
const TargetRegisterClass *WaveMaskRegClass = TRI->getWaveMaskRegClass();
const TargetRegisterClass *DstRegClass = MRI.getRegClass(DstReg);
Register LoopIterator = MRI.createVirtualRegister(WaveMaskRegClass);
- Register InitalValReg = MRI.createVirtualRegister(DstRegClass);
-
+ Register IdentityValReg = MRI.createVirtualRegister(DstRegClass);
Register AccumulatorReg = MRI.createVirtualRegister(DstRegClass);
Register ActiveBitsReg = MRI.createVirtualRegister(WaveMaskRegClass);
Register NewActiveBitsReg = MRI.createVirtualRegister(WaveMaskRegClass);
-
- Register FF1Reg = MRI.createVirtualRegister(DstRegClass);
- Register LaneValueReg =
- MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
+ Register FF1Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ Register LaneValueReg = MRI.createVirtualRegister(DstRegClass);
bool IsWave32 = ST.isWave32();
- unsigned MovOpc = IsWave32 ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
+ unsigned MovOpcForExec = IsWave32 ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
unsigned ExecReg = IsWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
// Create initial values of induction variable from Exec, Accumulator and
// insert branch instr to newly created ComputeBlock
- uint32_t InitalValue = getIdentityValueForWaveReduction(Opc);
- auto TmpSReg =
- BuildMI(BB, I, DL, TII->get(MovOpc), LoopIterator).addReg(ExecReg);
- BuildMI(BB, I, DL, TII->get(AMDGPU::S_MOV_B32), InitalValReg)
- .addImm(InitalValue);
+ uint32_t IdentityValue = getIdentityValueForWaveReduction(Opc);
+ BuildMI(BB, I, DL, TII->get(MovOpcForExec), LoopIterator).addReg(ExecReg);
+ if (is32BitOpc) {
+ BuildMI(BB, I, DL, TII->get(AMDGPU::S_MOV_B32), IdentityValReg)
+ .addImm(IdentityValue);
+ } else {
+ Register Identitylo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ Register Identityhi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ BuildMI(BB, I, DL, TII->get(AMDGPU::S_MOV_B32), Identityhi)
+ .addImm(IdentityValue);
+ switch (Opc) {
+ case AMDGPU::V_CMP_LT_U64_e64:
+ case AMDGPU::V_CMP_LT_I64_e64:
+ IdentityValue = int32_t(-1); // u|min
+ break;
+ case AMDGPU::V_CMP_GT_U64_e64:
+ case AMDGPU::V_CMP_GT_I64_e64:
+ IdentityValue = int32_t(0); // u|max
+ break;
+ }
+ BuildMI(BB, I, DL, TII->get(AMDGPU::S_MOV_B32), Identitylo)
+ .addImm(IdentityValue);
+ BuildMI(BB, I, DL, TII->get(TargetOpcode::REG_SEQUENCE), IdentityValReg)
+ .addReg(Identitylo)
+ .addImm(AMDGPU::sub0)
+ .addReg(Identityhi)
+ .addImm(AMDGPU::sub1);
+ }
// clang-format off
BuildMI(BB, I, DL, TII->get(AMDGPU::S_BRANCH))
.addMBB(ComputeLoop);
// clang-format on
// Start constructing ComputeLoop
- I = ComputeLoop->end();
+ I = ComputeLoop->begin();
auto Accumulator =
BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::PHI), AccumulatorReg)
- .addReg(InitalValReg)
+ .addReg(IdentityValReg)
.addMBB(&BB);
auto ActiveBits =
BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::PHI), ActiveBitsReg)
- .addReg(TmpSReg->getOperand(0).getReg())
+ .addReg(LoopIterator)
.addMBB(&BB);
+ I = ComputeLoop->end();
+ MachineInstr *NewAccumulator;
// Perform the computations
unsigned SFFOpc = IsWave32 ? AMDGPU::S_FF1_I32_B32 : AMDGPU::S_FF1_I32_B64;
- auto FF1 = BuildMI(*ComputeLoop, I, DL, TII->get(SFFOpc), FF1Reg)
- .addReg(ActiveBits->getOperand(0).getReg());
- auto LaneValue = BuildMI(*ComputeLoop, I, DL,
- TII->get(AMDGPU::V_READLANE_B32), LaneValueReg)
- .addReg(SrcReg)
- .addReg(FF1->getOperand(0).getReg());
- auto NewAccumulator = BuildMI(*ComputeLoop, I, DL, TII->get(Opc), DstReg)
- .addReg(Accumulator->getOperand(0).getReg())
- .addReg(LaneValue->getOperand(0).getReg());
-
+ BuildMI(*ComputeLoop, I, DL, TII->get(SFFOpc), FF1Reg)
+ .addReg(ActiveBitsReg);
+ if (is32BitOpc) {
+ BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_READLANE_B32),
+ LaneValueReg)
+ .addReg(SrcReg)
+ .addReg(FF1Reg);
+ NewAccumulator = BuildMI(*ComputeLoop, I, DL, TII->get(Opc), DstReg)
+ .addReg(Accumulator->getOperand(0).getReg())
+ .addReg(LaneValueReg);
+ } else {
+ Register LaneValueLoReg =
+ MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
+ Register LaneValueHiReg =
+ MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
+ Register LaneValReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
+ const TargetRegisterClass *SrcSubRC =
+ TRI->getSubRegisterClass(SrcRC, AMDGPU::sub0);
+ MachineOperand Op1L = TII->buildExtractSubRegOrImm(
+ MI, MRI, MI.getOperand(1), SrcRC, AMDGPU::sub0, SrcSubRC);
+ MachineOperand Op1H = TII->buildExtractSubRegOrImm(
+ MI, MRI, MI.getOperand(1), SrcRC, AMDGPU::sub1, SrcSubRC);
+ // lane value input should be in an sgpr
+ MachineInstr *LaneValueLo =
+ BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_READLANE_B32),
+ LaneValueLoReg)
+ .add(Op1L)
+ .addReg(FF1Reg);
+ MachineInstr *LaneValueHi =
+ BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_READLANE_B32),
+ LaneValueHiReg)
+ .add(Op1H)
+ .addReg(FF1Reg);
+ auto LaneValue = BuildMI(*ComputeLoop, I, DL,
+ TII->get(TargetOpcode::REG_SEQUENCE), LaneValReg)
+ .addReg(LaneValueLoReg)
+ .addImm(AMDGPU::sub0)
+ .addReg(LaneValueHiReg)
+ .addImm(AMDGPU::sub1);
+ switch (Opc) {
+ case AMDGPU::V_CMP_GT_I64_e64:
+ case AMDGPU::V_CMP_GT_U64_e64:
+ case AMDGPU::V_CMP_LT_I64_e64:
+ case AMDGPU::V_CMP_LT_U64_e64: {
+ Register LaneMaskReg = MRI.createVirtualRegister(WaveMaskRegClass);
+ Register ComparisonResultReg =
+ MRI.createVirtualRegister(WaveMaskRegClass);
+ const TargetRegisterClass *VregClass =
+ ST.needsAlignedVGPRs() ? &AMDGPU::VReg_64_Align2RegClass
+ : &AMDGPU::VReg_64RegClass;
+ const TargetRegisterClass *VSubRegClass =
+ TRI->getSubRegisterClass(VregClass, AMDGPU::sub0);
+ Register AccumulatorVReg = MRI.createVirtualRegister(VregClass);
+ MachineOperand SrcReg0Sub0 =
+ TII->buildExtractSubRegOrImm(MI, MRI, Accumulator->getOperand(0),
+ VregClass, AMDGPU::sub0, VSubRegClass);
+ MachineOperand SrcReg0Sub1 =
+ TII->buildExtractSubRegOrImm(MI, MRI, Accumulator->getOperand(0),
+ VregClass, AMDGPU::sub1, VSubRegClass);
+ BuildMI(*ComputeLoop, I, DL, TII->get(TargetOpcode::REG_SEQUENCE),
+ AccumulatorVReg)
+ .add(SrcReg0Sub0)
+ .addImm(AMDGPU::sub0)
+ .add(SrcReg0Sub1)
+ .addImm(AMDGPU::sub1);
+ BuildMI(*ComputeLoop, I, DL, TII->get(Opc), LaneMaskReg)
+ .addReg(LaneValue->getOperand(0).getReg())
+ .addReg(AccumulatorVReg);
+
+ unsigned AndOpc = IsWave32 ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
+ BuildMI(*ComputeLoop, I, DL, TII->get(AndOpc), ComparisonResultReg)
+ .addReg(LaneMaskReg)
+ .addReg(ActiveBitsReg);
+
+ NewAccumulator = BuildMI(*ComputeLoop, I, DL,
+ TII->get(AMDGPU::S_CSELECT_B64), DstReg)
+ .addReg(LaneValue->getOperand(0).getReg())
+ .addReg(Accumulator->getOperand(0).getReg());
+ break;
+ }
+ }
+ }
// Manipulate the iterator to get the next active lane
unsigned BITSETOpc =
IsWave32 ? AMDGPU::S_BITSET0_B32 : AMDGPU::S_BITSET0_B64;
- auto NewActiveBits =
- BuildMI(*ComputeLoop, I, DL, TII->get(BITSETOpc), NewActiveBitsReg)
- .addReg(FF1->getOperand(0).getReg())
- .addReg(ActiveBits->getOperand(0).getReg());
+ BuildMI(*ComputeLoop, I, DL, TII->get(BITSETOpc), NewActiveBitsReg)
+ .addReg(FF1Reg)
+ .addReg(ActiveBitsReg);
// Add phi nodes
Accumulator.addReg(NewAccumulator->getOperand(0).getReg())
.addMBB(ComputeLoop);
- ActiveBits.addReg(NewActiveBits->getOperand(0).getReg())
- .addMBB(ComputeLoop);
+ ActiveBits.addReg(NewActiveBitsReg).addMBB(ComputeLoop);
// Creating branching
unsigned CMPOpc = IsWave32 ? AMDGPU::S_CMP_LG_U32 : AMDGPU::S_CMP_LG_U64;
BuildMI(*ComputeLoop, I, DL, TII->get(CMPOpc))
- .addReg(NewActiveBits->getOperand(0).getReg())
+ .addReg(NewActiveBitsReg)
.addImm(0);
BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
.addMBB(ComputeLoop);
@@ -5310,12 +5413,20 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
switch (MI.getOpcode()) {
case AMDGPU::WAVE_REDUCE_UMIN_PSEUDO_U32:
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_MIN_U32);
+ case AMDGPU::WAVE_REDUCE_UMIN_PSEUDO_U64:
+ return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_CMP_LT_U64_e64);
case AMDGPU::WAVE_REDUCE_MIN_PSEUDO_I32:
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_MIN_I32);
+ case AMDGPU::WAVE_REDUCE_MIN_PSEUDO_I64:
+ return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_CMP_LT_I64_e64);
case AMDGPU::WAVE_REDUCE_UMAX_PSEUDO_U32:
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_MAX_U32);
+ case AMDGPU::WAVE_REDUCE_UMAX_PSEUDO_U64:
+ return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_CMP_GT_U64_e64);
case AMDGPU::WAVE_REDUCE_MAX_PSEUDO_I32:
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_MAX_I32);
+ case AMDGPU::WAVE_REDUCE_MAX_PSEUDO_I64:
+ return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_CMP_GT_I64_e64);
case AMDGPU::WAVE_REDUCE_ADD_PSEUDO_I32:
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_ADD_I32);
case AMDGPU::WAVE_REDUCE_SUB_PSEUDO_I32:
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index d05be8f95c618..25a491ab87a06 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -304,28 +304,52 @@ def : GCNPat<(i32 (int_amdgcn_set_inactive_chain_arg i32:$src, i32:$inactive)),
(V_SET_INACTIVE_B32 0, VGPR_32:$src, 0, VGPR_32:$inactive, (IMPLICIT_DEF))>;
// clang-format off
-defvar int_amdgcn_wave_reduce_ = "int_amdgcn_wave_reduce_";
+
multiclass
- AMDGPUWaveReducePseudoGenerator<string Op, string DataType> {
+ AMDGPUWaveReducePseudoGenerator<string Op, string DataType, ValueType ty, RegisterClass RetReg, SrcRegOrImm9 Reg> {
let usesCustomInserter = 1, hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC] in {
def !toupper(Op) #"_PSEUDO_" #DataType
- : VPseudoInstSI<(outs SGPR_32 : $sdst),
- (ins VSrc_b32 : $src, VSrc_b32 : $strategy),
- [(set i32 : $sdst, (!cast<AMDGPUWaveReduce>(int_amdgcn_wave_reduce_ #Op) i32 : $src, i32 : $strategy))]> {}
+ : VPseudoInstSI<(outs RetReg : $sdst),
+ (ins Reg : $src, VSrc_b32 : $strategy),
+ [(set ty : $sdst, (!cast<AMDGPUWaveReduce>("int_amdgcn_wave_reduce_" #Op) ty : $src, i32 : $strategy))]> {}
}
}
// clang-format on
+class WaveReduceOp<string OpName, string TypeStr, ValueType Ty,
+ RegisterClass ReturnRegisterClass, SrcRegOrImm9 RC> {
+ string Name = OpName;
+ string TypeString = TypeStr;
+ ValueType VT = Ty;
+ RegisterClass RetReg = ReturnRegisterClass;
+ SrcRegOrImm9 Reg = RC;
+}
+
// Input list : [Operation_name,
-// type - Signed(I)/Unsigned(U)/Float(F)/Bitwise(B)]
+// type - Signed(I)/Unsigned(U)/Float(F)/Bitwise(B),
+// bit-width
+// output register class,
+// input register class]
defvar Operations = [
- ["umin", "U32"], ["min", "I32"], ["umax", "U32"], ["max", "I32"],
- ["add", "I32"], ["sub", "I32"], ["and", "B32"], ["or", "B32"],
- ["xor", "B32"]
+ WaveReduceOp<"umin", "U32", i32, SGPR_32, VSrc_b32>,
+ WaveReduceOp<"min", "I32", i32, SGPR_32, VSrc_b32>,
+ WaveReduceOp<"umax", "U32", i32, SGPR_32, VSrc_b32>,
+ WaveReduceOp<"max", "I32", i32, SGPR_32, VSrc_b32>,
+ WaveReduceOp<"add", "I32", i32, SGPR_32, VSrc_b32>,
+ WaveReduceOp<"sub", "I32", i32, SGPR_32, VSrc_b32>,
+ WaveReduceOp<"and", "B32", i32, SGPR_32, VSrc_b32>,
+ WaveReduceOp<"or", "B32", i32, SGPR_32, VSrc_b32>,
+ WaveReduceOp<"xor", "B32", i32, SGPR_32, VSrc_b32>,
+
+ WaveReduceOp<"umin", "U64", i64, SGPR_64, VSrc_b64>,
+ WaveReduceOp<"min", "I64", i64, SGPR_64, VSrc_b64>,
+ WaveReduceOp<"umax", "U64", i64, SGPR_64, VSrc_b64>,
+ WaveReduceOp<"max", "I64", i64, SGPR_64, VSrc_b64>,
];
foreach Op = Operations in {
- defm WAVE_REDUCE_ : AMDGPUWaveReducePseudoGenerator<Op[0], Op[1]>;
+ defm WAVE_REDUCE_ : AMDGPUWaveReducePseudoGenerator<Op.Name, Op.TypeString,
+ Op.VT, Op.RetReg, Op.Reg>;
}
let usesCustomInserter = 1, Defs = [VCC] in {
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
index d2ca1d8136043..7c26e3e45c8e4 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.add.ll
@@ -1230,4 +1230,4 @@ endif:
; GFX10DAGISEL: {{.*}}
; GFX10GISEL: {{.*}}
; GFX11DAGISEL: {{.*}}
-; GFX11GISEL: {{.*}}
+; GFX11GISEL: {{.*}}
\ No newline at end of file
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
index 7dc0cb05b0abe..96b67e71fcd28 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.max.ll
@@ -980,3 +980,903 @@ endif:
store i32 %combine, ptr addrspace(1) %out
ret void
}
+
+define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: uniform_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: uniform_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: uniform_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: uniform_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: uniform_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: uniform_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: uniform_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: uniform_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: uniform_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: uniform_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.max.i64(i64 %in, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
+; GFX8DAGISEL-LABEL: const_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: const_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: const_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: const_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: const_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: const_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX10GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX10GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: const_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: const_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: const_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: const_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.max.i64(i64 123, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: poison_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: poison_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: poison_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: poison_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: poison_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT: global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: poison_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT: global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX11DAGISEL-LABEL: poison_value_i64:
+; GFX11DAGISEL: ; %bb.0: ; %entry
+; GFX11DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX11DAGISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX11DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11DAGISEL-NEXT: global_store_b64 v0, v[0:1], s[0:1]
+; GFX11DAGISEL-NEXT: s_endpgm
+;
+; GFX11GISEL-LABEL: poison_value_i64:
+; GFX11GISEL: ; %bb.0: ; %entry
+; GFX11GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX11GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX11GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11GISEL-NEXT: global_store_b64 v0, v[0:1], s[0:1]
+; GFX11GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.max.i64(i64 poison, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
+; GFX8DAGISEL-LABEL: divergent_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT: s_brev_b32 s5, 1
+; GFX8DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX8DAGISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX8DAGISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX8DAGISEL-NEXT: v_cmp_gt_i64_e32 vcc, s[8:9], v[4:5]
+; GFX8DAGISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX8DAGISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX8DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX8DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX8DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX8DAGISEL-NEXT: ; %bb.2:
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8GISEL-LABEL: divergent_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT: s_brev_b32 s5, 1
+; GFX8GISEL-NEXT: s_mov_b32 s4, 0
+; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX8GISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX8GISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX8GISEL-NEXT: v_cmp_gt_i64_e32 vcc, s[8:9], v[4:5]
+; GFX8GISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX8GISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX8GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX8GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX8GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX8GISEL-NEXT: ; %bb.2:
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9DAGISEL-LABEL: divergent_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT: s_brev_b32 s5, 1
+; GFX9DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX9DAGISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX9DAGISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX9DAGISEL-NEXT: v_cmp_gt_i64_e32 vcc, s[8:9], v[4:5]
+; GFX9DAGISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX9DAGISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX9DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX9DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX9DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX9DAGISEL-NEXT: ; %bb.2:
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9GISEL-LABEL: divergent_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT: s_brev_b32 s5, 1
+; GFX9GISEL-NEXT: s_mov_b32 s4, 0
+; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX9GISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX9GISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX9GISEL-NEXT: v_cmp_gt_i64_e32 vcc, s[8:9], v[4:5]
+; GFX9GISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX9GISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX9GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX9GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX9GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX9GISEL-NEXT: ; %bb.2:
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX9GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064DAGISEL-LABEL: divergent_value_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT: s_brev_b32 s5, 1
+; GFX1064DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX1064DAGISEL-NEXT: v_cmp_gt_i64_e32 vcc, s[8:9], v[4:5]
+; GFX1064DAGISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX1064DAGISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX1064DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1064DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX1064DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1064DAGISEL-NEXT: ; %bb.2:
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064GISEL-LABEL: divergent_value_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT: s_brev_b32 s5, 1
+; GFX1064GISEL-NEXT: s_mov_b32 s4, 0
+; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX1064GISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX1064GISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX1064GISEL-NEXT: v_cmp_gt_i64_e32 vcc, s[8:9], v[4:5]
+; GFX1064GISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX1064GISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX1064GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1064GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX1064GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1064GISEL-NEXT: ; %bb.2:
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1064GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032DAGISEL-LABEL: divergent_value_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT: s_brev_b32 s5, 1
+; GFX1032DAGISEL-NEXT: s_mov_b32 s4, 0
+; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s8, v2, s7
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s9, v3, s7
+; GFX1032DAGISEL-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[8:9], v[4:5]
+; GFX1032DAGISEL-NEXT: s_and_b32 s10, vcc_lo, s6
+; GFX1032DAGISEL-NEXT: s_bitset0_b32 s6, s7
+; GFX1032DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1032DAGISEL-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1032DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1032DAGISEL-NEXT: ; %bb.2:
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032GISEL-LABEL: divergent_value_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT: s_brev_b32 s5, 1
+; GFX1032GISEL-NEXT: s_mov_b32 s4, 0
+; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX1032GISEL-NEXT: v_readlane_b32 s8, v2, s7
+; GFX1032GISEL-NEXT: v_readlane_b32 s9, v3, s7
+; GFX1032GISEL-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[8:9], v[4:5]
+; GFX1032GISEL-NEXT: s_and_b32 s10, vcc_lo, s6
+; GFX1032GISEL-NEXT: s_bitset0_b32 s6, s7
+; GFX1032GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1032GISEL-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1032GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1032GISEL-NEXT: ; %bb.2:
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164DAGISEL-LABEL: divergent_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT: s_brev_b32 s1, 1
+; GFX1164DAGISEL-NEXT: s_mov_b32 s0, 0
+; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v4, s0
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v5, s1
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s4, v2, s8
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v3, s8
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT: v_cmp_gt_i64_e32 vcc, s[4:5], v[4:5]
+; GFX1164DAGISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
+; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s8
+; GFX1164DAGISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1164DAGISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1164DAGISEL-NEXT: ; %bb.2:
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1164DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164GISEL-LABEL: divergent_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT: s_brev_b32 s1, 1
+; GFX1164GISEL-NEXT: s_mov_b32 s0, 0
+; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v4, s0
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v5, s1
+; GFX1164GISEL-NEXT: v_readlane_b32 s4, v2, s8
+; GFX1164GISEL-NEXT: v_readlane_b32 s5, v3, s8
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_cmp_gt_i64_e32 vcc, s[4:5], v[4:5]
+; GFX1164GISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
+; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s8
+; GFX1164GISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1164GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1164GISEL-NEXT: ; %bb.2:
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1164GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132DAGISEL-LABEL: divergent_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT: s_brev_b32 s1, 1
+; GFX1132DAGISEL-NEXT: s_mov_b32 s0, 0
+; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[4:5], v[4:5]
+; GFX1132DAGISEL-NEXT: s_and_b32 s6, vcc_lo, s2
+; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX1132DAGISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1132DAGISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1132DAGISEL-NEXT: ; %bb.2:
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132GISEL-LABEL: divergent_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT: s_brev_b32 s1, 1
+; GFX1132GISEL-NEXT: s_mov_b32 s0, 0
+; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
+; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[4:5], v[4:5]
+; GFX1132GISEL-NEXT: s_and_b32 s6, vcc_lo, s2
+; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX1132GISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1132GISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1132GISEL-NEXT: ; %bb.2:
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132GISEL-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.max.i64(i64 %id.x, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64 %in2) {
+; GFX8DAGISEL-LABEL: divergent_cfg_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX8DAGISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GFX8DAGISEL-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
+; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[6:7], s[6:7]
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8DAGISEL-NEXT: s_xor_b64 exec, exec, s[6:7]
+; GFX8DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX8DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX8DAGISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: divergent_cfg_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX8GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX8GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX8GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX8GISEL-NEXT: ; %bb.1: ; %else
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX8GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX8GISEL-NEXT: ; %bb.3: ; %if
+; GFX8GISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX8GISEL-NEXT: .LBB9_4: ; %endif
+; GFX8GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: divergent_cfg_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX9DAGISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9DAGISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[4:5], s[4:5]
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: s_xor_b64 exec, exec, s[4:5]
+; GFX9DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX9DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX9DAGISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: divergent_cfg_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX9GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX9GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX9GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX9GISEL-NEXT: ; %bb.1: ; %else
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX9GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX9GISEL-NEXT: ; %bb.3: ; %if
+; GFX9GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], s[6:7]
+; GFX9GISEL-NEXT: .LBB9_4: ; %endif
+; GFX9GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX1064DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_clause 0x1
+; GFX1064DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064DAGISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[4:5], s[4:5]
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT: s_xor_b64 exec, exec, s[4:5]
+; GFX1064DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1064DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1064DAGISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT: s_endpgm
+;
+; GFX1064GISEL-LABEL: divergent_cfg_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX1064GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1064GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX1064GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1064GISEL-NEXT: ; %bb.1: ; %else
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1064GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1064GISEL-NEXT: ; %bb.3: ; %if
+; GFX1064GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], s[6:7]
+; GFX1064GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1064GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT: s_endpgm
+;
+; GFX1032DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_clause 0x1
+; GFX1032DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc_lo, 15, v0
+; GFX1032DAGISEL-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032DAGISEL-NEXT: s_xor_b32 s4, exec_lo, s4
+; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s4, s4
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s4
+; GFX1032DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1032DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1032DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT: s_endpgm
+;
+; GFX1032GISEL-LABEL: divergent_cfg_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 16, v0
+; GFX1032GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1032GISEL-NEXT: s_and_saveexec_b32 s8, vcc_lo
+; GFX1032GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1032GISEL-NEXT: ; %bb.1: ; %else
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1032GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s2, s8
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1032GISEL-NEXT: ; %bb.3: ; %if
+; GFX1032GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b64 s[6:7], s[6:7]
+; GFX1032GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1032GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_clause 0x1
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX1164DAGISEL-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
+; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[6:7], s[6:7]
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: s_xor_b64 exec, exec, s[6:7]
+; GFX1164DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX1164DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1164DAGISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: divergent_cfg_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164GISEL-NEXT: s_mov_b64 s[8:9], exec
+; GFX1164GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
+; GFX1164GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1164GISEL-NEXT: ; %bb.1: ; %else
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1164GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[2:3], s[8:9]
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1164GISEL-NEXT: ; %bb.3: ; %if
+; GFX1164GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX1164GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1164GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_clause 0x1
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX1132DAGISEL-NEXT: s_xor_b32 s6, exec_lo, s6
+; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s6, s6
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s6
+; GFX1132DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX1132DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1132DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s6
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: divergent_cfg_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132GISEL-NEXT: s_mov_b32 s8, exec_lo
+; GFX1132GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
+; GFX1132GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1132GISEL-NEXT: ; %bb.1: ; %else
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1132GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s2, s8
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1132GISEL-NEXT: ; %bb.3: ; %if
+; GFX1132GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX1132GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1132GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %d_cmp = icmp ult i32 %tid, 16
+ br i1 %d_cmp, label %if, label %else
+
+if:
+ %reducedValTid = call i64 @llvm.amdgcn.wave.reduce.max.i64(i64 %in2, i32 1)
+ br label %endif
+
+else:
+ %reducedValIn = call i64 @llvm.amdgcn.wave.reduce.max.i64(i64 %in, i32 1)
+ br label %endif
+
+endif:
+ %combine = phi i64 [%reducedValTid, %if], [%reducedValIn, %else]
+ store i64 %combine, ptr addrspace(1) %out
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
index 7cb0e6533c722..4e0c9ec111cbe 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.min.ll
@@ -980,3 +980,903 @@ endif:
store i32 %combine, ptr addrspace(1) %out
ret void
}
+
+define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: uniform_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: uniform_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: uniform_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: uniform_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: uniform_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: uniform_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: uniform_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: uniform_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: uniform_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: uniform_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.min.i64(i64 %in, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
+; GFX8DAGISEL-LABEL: const_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: const_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: const_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: const_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: const_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: const_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX10GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX10GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: const_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: const_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: const_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: const_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.min.i64(i64 123, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: poison_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: poison_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: poison_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: poison_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: poison_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT: global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: poison_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT: global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX11DAGISEL-LABEL: poison_value_i64:
+; GFX11DAGISEL: ; %bb.0: ; %entry
+; GFX11DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX11DAGISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX11DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11DAGISEL-NEXT: global_store_b64 v0, v[0:1], s[0:1]
+; GFX11DAGISEL-NEXT: s_endpgm
+;
+; GFX11GISEL-LABEL: poison_value_i64:
+; GFX11GISEL: ; %bb.0: ; %entry
+; GFX11GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX11GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX11GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11GISEL-NEXT: global_store_b64 v0, v[0:1], s[0:1]
+; GFX11GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.min.i64(i64 poison, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
+; GFX8DAGISEL-LABEL: divergent_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT: s_brev_b32 s5, -2
+; GFX8DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX8DAGISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX8DAGISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX8DAGISEL-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[4:5]
+; GFX8DAGISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX8DAGISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX8DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX8DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX8DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX8DAGISEL-NEXT: ; %bb.2:
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8GISEL-LABEL: divergent_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT: s_brev_b32 s5, -2
+; GFX8GISEL-NEXT: s_mov_b32 s4, -1
+; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX8GISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX8GISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX8GISEL-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[4:5]
+; GFX8GISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX8GISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX8GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX8GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX8GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX8GISEL-NEXT: ; %bb.2:
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9DAGISEL-LABEL: divergent_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT: s_brev_b32 s5, -2
+; GFX9DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX9DAGISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX9DAGISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX9DAGISEL-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[4:5]
+; GFX9DAGISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX9DAGISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX9DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX9DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX9DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX9DAGISEL-NEXT: ; %bb.2:
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9GISEL-LABEL: divergent_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT: s_brev_b32 s5, -2
+; GFX9GISEL-NEXT: s_mov_b32 s4, -1
+; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX9GISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX9GISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX9GISEL-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[4:5]
+; GFX9GISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX9GISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX9GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX9GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX9GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX9GISEL-NEXT: ; %bb.2:
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX9GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064DAGISEL-LABEL: divergent_value_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT: s_brev_b32 s5, -2
+; GFX1064DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX1064DAGISEL-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[4:5]
+; GFX1064DAGISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX1064DAGISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX1064DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1064DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX1064DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1064DAGISEL-NEXT: ; %bb.2:
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064GISEL-LABEL: divergent_value_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT: s_brev_b32 s5, -2
+; GFX1064GISEL-NEXT: s_mov_b32 s4, -1
+; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX1064GISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX1064GISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX1064GISEL-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[4:5]
+; GFX1064GISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX1064GISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX1064GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1064GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX1064GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1064GISEL-NEXT: ; %bb.2:
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1064GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032DAGISEL-LABEL: divergent_value_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT: s_brev_b32 s5, -2
+; GFX1032DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s8, v2, s7
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s9, v3, s7
+; GFX1032DAGISEL-NEXT: v_cmp_lt_i64_e32 vcc_lo, s[8:9], v[4:5]
+; GFX1032DAGISEL-NEXT: s_and_b32 s10, vcc_lo, s6
+; GFX1032DAGISEL-NEXT: s_bitset0_b32 s6, s7
+; GFX1032DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1032DAGISEL-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1032DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1032DAGISEL-NEXT: ; %bb.2:
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032GISEL-LABEL: divergent_value_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT: s_brev_b32 s5, -2
+; GFX1032GISEL-NEXT: s_mov_b32 s4, -1
+; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX1032GISEL-NEXT: v_readlane_b32 s8, v2, s7
+; GFX1032GISEL-NEXT: v_readlane_b32 s9, v3, s7
+; GFX1032GISEL-NEXT: v_cmp_lt_i64_e32 vcc_lo, s[8:9], v[4:5]
+; GFX1032GISEL-NEXT: s_and_b32 s10, vcc_lo, s6
+; GFX1032GISEL-NEXT: s_bitset0_b32 s6, s7
+; GFX1032GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1032GISEL-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1032GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1032GISEL-NEXT: ; %bb.2:
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164DAGISEL-LABEL: divergent_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT: s_brev_b32 s1, -2
+; GFX1164DAGISEL-NEXT: s_mov_b32 s0, -1
+; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v4, s0
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v5, s1
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s4, v2, s8
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v3, s8
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[4:5]
+; GFX1164DAGISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
+; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s8
+; GFX1164DAGISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1164DAGISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1164DAGISEL-NEXT: ; %bb.2:
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1164DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164GISEL-LABEL: divergent_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT: s_brev_b32 s1, -2
+; GFX1164GISEL-NEXT: s_mov_b32 s0, -1
+; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v4, s0
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v5, s1
+; GFX1164GISEL-NEXT: v_readlane_b32 s4, v2, s8
+; GFX1164GISEL-NEXT: v_readlane_b32 s5, v3, s8
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[4:5]
+; GFX1164GISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
+; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s8
+; GFX1164GISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1164GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1164GISEL-NEXT: ; %bb.2:
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1164GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132DAGISEL-LABEL: divergent_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT: s_brev_b32 s1, -2
+; GFX1132DAGISEL-NEXT: s_mov_b32 s0, -1
+; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT: v_cmp_lt_i64_e32 vcc_lo, s[4:5], v[4:5]
+; GFX1132DAGISEL-NEXT: s_and_b32 s6, vcc_lo, s2
+; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX1132DAGISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1132DAGISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1132DAGISEL-NEXT: ; %bb.2:
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132GISEL-LABEL: divergent_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT: s_brev_b32 s1, -2
+; GFX1132GISEL-NEXT: s_mov_b32 s0, -1
+; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
+; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_cmp_lt_i64_e32 vcc_lo, s[4:5], v[4:5]
+; GFX1132GISEL-NEXT: s_and_b32 s6, vcc_lo, s2
+; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX1132GISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1132GISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1132GISEL-NEXT: ; %bb.2:
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132GISEL-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.min.i64(i64 %id.x, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64 %in2) {
+; GFX8DAGISEL-LABEL: divergent_cfg_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX8DAGISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GFX8DAGISEL-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
+; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[6:7], s[6:7]
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8DAGISEL-NEXT: s_xor_b64 exec, exec, s[6:7]
+; GFX8DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX8DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX8DAGISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: divergent_cfg_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX8GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX8GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX8GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX8GISEL-NEXT: ; %bb.1: ; %else
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX8GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX8GISEL-NEXT: ; %bb.3: ; %if
+; GFX8GISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX8GISEL-NEXT: .LBB9_4: ; %endif
+; GFX8GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: divergent_cfg_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX9DAGISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9DAGISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[4:5], s[4:5]
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: s_xor_b64 exec, exec, s[4:5]
+; GFX9DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX9DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX9DAGISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: divergent_cfg_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX9GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX9GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX9GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX9GISEL-NEXT: ; %bb.1: ; %else
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX9GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX9GISEL-NEXT: ; %bb.3: ; %if
+; GFX9GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], s[6:7]
+; GFX9GISEL-NEXT: .LBB9_4: ; %endif
+; GFX9GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX1064DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_clause 0x1
+; GFX1064DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064DAGISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[4:5], s[4:5]
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT: s_xor_b64 exec, exec, s[4:5]
+; GFX1064DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1064DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1064DAGISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT: s_endpgm
+;
+; GFX1064GISEL-LABEL: divergent_cfg_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX1064GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1064GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX1064GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1064GISEL-NEXT: ; %bb.1: ; %else
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1064GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1064GISEL-NEXT: ; %bb.3: ; %if
+; GFX1064GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], s[6:7]
+; GFX1064GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1064GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT: s_endpgm
+;
+; GFX1032DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_clause 0x1
+; GFX1032DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc_lo, 15, v0
+; GFX1032DAGISEL-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032DAGISEL-NEXT: s_xor_b32 s4, exec_lo, s4
+; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s4, s4
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s4
+; GFX1032DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1032DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1032DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT: s_endpgm
+;
+; GFX1032GISEL-LABEL: divergent_cfg_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 16, v0
+; GFX1032GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1032GISEL-NEXT: s_and_saveexec_b32 s8, vcc_lo
+; GFX1032GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1032GISEL-NEXT: ; %bb.1: ; %else
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1032GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s2, s8
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1032GISEL-NEXT: ; %bb.3: ; %if
+; GFX1032GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b64 s[6:7], s[6:7]
+; GFX1032GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1032GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_clause 0x1
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX1164DAGISEL-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
+; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[6:7], s[6:7]
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: s_xor_b64 exec, exec, s[6:7]
+; GFX1164DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX1164DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1164DAGISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: divergent_cfg_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164GISEL-NEXT: s_mov_b64 s[8:9], exec
+; GFX1164GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
+; GFX1164GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1164GISEL-NEXT: ; %bb.1: ; %else
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1164GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[2:3], s[8:9]
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1164GISEL-NEXT: ; %bb.3: ; %if
+; GFX1164GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX1164GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1164GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_clause 0x1
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX1132DAGISEL-NEXT: s_xor_b32 s6, exec_lo, s6
+; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s6, s6
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s6
+; GFX1132DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX1132DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1132DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s6
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: divergent_cfg_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132GISEL-NEXT: s_mov_b32 s8, exec_lo
+; GFX1132GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
+; GFX1132GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1132GISEL-NEXT: ; %bb.1: ; %else
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1132GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s2, s8
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1132GISEL-NEXT: ; %bb.3: ; %if
+; GFX1132GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX1132GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1132GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %d_cmp = icmp ult i32 %tid, 16
+ br i1 %d_cmp, label %if, label %else
+
+if:
+ %reducedValTid = call i64 @llvm.amdgcn.wave.reduce.min.i64(i64 %in2, i32 1)
+ br label %endif
+
+else:
+ %reducedValIn = call i64 @llvm.amdgcn.wave.reduce.min.i64(i64 %in, i32 1)
+ br label %endif
+
+endif:
+ %combine = phi i64 [%reducedValTid, %if], [%reducedValIn, %else]
+ store i64 %combine, ptr addrspace(1) %out
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
index ba7a816184cd8..a4a5b01a873b7 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
@@ -947,3 +947,673 @@ endif:
store i32 %combine, ptr addrspace(1) %out
ret void
}
+
+define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: uniform_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: uniform_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: uniform_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: uniform_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: uniform_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: uniform_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: uniform_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: uniform_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: uniform_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: uniform_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.umax.i64(i64 %in, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
+; GFX8DAGISEL-LABEL: const_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: const_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: const_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v1, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: const_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: const_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT: global_store_dwordx2 v1, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: const_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX10GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX10GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: const_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: global_store_b64 v1, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: const_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: const_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, 0x7b
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: global_store_b64 v1, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: const_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.umax.i64(i64 123, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: poison_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: poison_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: poison_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: poison_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: poison_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: poison_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX11DAGISEL-LABEL: poison_value_i64:
+; GFX11DAGISEL: ; %bb.0: ; %entry
+; GFX11DAGISEL-NEXT: s_endpgm
+;
+; GFX11GISEL-LABEL: poison_value_i64:
+; GFX11GISEL: ; %bb.0: ; %entry
+; GFX11GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.umax.i64(i64 poison, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
+; GFX8DAGISEL-LABEL: divergent_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: divergent_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: divergent_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: divergent_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: divergent_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: divergent_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: divergent_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: divergent_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: divergent_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: divergent_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.umax.i64(i64 %id.x, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64 %in2) {
+; GFX8DAGISEL-LABEL: divergent_cfg_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX8DAGISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GFX8DAGISEL-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
+; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[6:7], s[6:7]
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8DAGISEL-NEXT: s_xor_b64 exec, exec, s[6:7]
+; GFX8DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX8DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX8DAGISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: divergent_cfg_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX8GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX8GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX8GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX8GISEL-NEXT: ; %bb.1: ; %else
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX8GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX8GISEL-NEXT: ; %bb.3: ; %if
+; GFX8GISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX8GISEL-NEXT: .LBB9_4: ; %endif
+; GFX8GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: divergent_cfg_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX9DAGISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9DAGISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[4:5], s[4:5]
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: s_xor_b64 exec, exec, s[4:5]
+; GFX9DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX9DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX9DAGISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: divergent_cfg_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX9GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX9GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX9GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX9GISEL-NEXT: ; %bb.1: ; %else
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX9GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX9GISEL-NEXT: ; %bb.3: ; %if
+; GFX9GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], s[6:7]
+; GFX9GISEL-NEXT: .LBB9_4: ; %endif
+; GFX9GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX1064DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_clause 0x1
+; GFX1064DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064DAGISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[4:5], s[4:5]
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT: s_xor_b64 exec, exec, s[4:5]
+; GFX1064DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1064DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1064DAGISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT: s_endpgm
+;
+; GFX1064GISEL-LABEL: divergent_cfg_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX1064GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1064GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX1064GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1064GISEL-NEXT: ; %bb.1: ; %else
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1064GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1064GISEL-NEXT: ; %bb.3: ; %if
+; GFX1064GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], s[6:7]
+; GFX1064GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1064GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT: s_endpgm
+;
+; GFX1032DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_clause 0x1
+; GFX1032DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc_lo, 15, v0
+; GFX1032DAGISEL-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032DAGISEL-NEXT: s_xor_b32 s4, exec_lo, s4
+; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s4, s4
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s4
+; GFX1032DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1032DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1032DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT: s_endpgm
+;
+; GFX1032GISEL-LABEL: divergent_cfg_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 16, v0
+; GFX1032GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1032GISEL-NEXT: s_and_saveexec_b32 s8, vcc_lo
+; GFX1032GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1032GISEL-NEXT: ; %bb.1: ; %else
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1032GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s2, s8
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1032GISEL-NEXT: ; %bb.3: ; %if
+; GFX1032GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b64 s[6:7], s[6:7]
+; GFX1032GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1032GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_clause 0x1
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX1164DAGISEL-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
+; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[6:7], s[6:7]
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: s_xor_b64 exec, exec, s[6:7]
+; GFX1164DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX1164DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1164DAGISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: divergent_cfg_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164GISEL-NEXT: s_mov_b64 s[8:9], exec
+; GFX1164GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
+; GFX1164GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1164GISEL-NEXT: ; %bb.1: ; %else
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1164GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[2:3], s[8:9]
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1164GISEL-NEXT: ; %bb.3: ; %if
+; GFX1164GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX1164GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1164GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_clause 0x1
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX1132DAGISEL-NEXT: s_xor_b32 s6, exec_lo, s6
+; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s6, s6
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s6
+; GFX1132DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX1132DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1132DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s6
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: divergent_cfg_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132GISEL-NEXT: s_mov_b32 s8, exec_lo
+; GFX1132GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
+; GFX1132GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1132GISEL-NEXT: ; %bb.1: ; %else
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1132GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s2, s8
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1132GISEL-NEXT: ; %bb.3: ; %if
+; GFX1132GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX1132GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1132GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %d_cmp = icmp ult i32 %tid, 16
+ br i1 %d_cmp, label %if, label %else
+
+if:
+ %reducedValTid = call i64 @llvm.amdgcn.wave.reduce.umax.i64(i64 %in2, i32 1)
+ br label %endif
+
+else:
+ %reducedValIn = call i64 @llvm.amdgcn.wave.reduce.umax.i64(i64 %in, i32 1)
+ br label %endif
+
+endif:
+ %combine = phi i64 [%reducedValTid, %if], [%reducedValIn, %else]
+ store i64 %combine, ptr addrspace(1) %out
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
index 3eaa89c957474..29a78855d6629 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
@@ -947,3 +947,865 @@ endif:
store i32 %combine, ptr addrspace(1) %out
ret void
}
+
+define amdgpu_kernel void @uniform_value_i64(ptr addrspace(1) %out, i64 %in) {
+; GFX8DAGISEL-LABEL: uniform_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: uniform_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: uniform_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: uniform_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: uniform_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: uniform_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: uniform_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: uniform_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: uniform_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: uniform_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.umin.i64(i64 %in, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @const_value_i64(ptr addrspace(1) %out) {
+; GFX8DAGISEL-LABEL: const_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: const_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: const_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v1, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: const_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: const_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT: global_store_dwordx2 v1, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: const_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX10GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX10GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: const_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: global_store_b64 v1, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: const_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: const_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, 0x7b
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: global_store_b64 v1, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: const_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v0, 0x7b
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.umin.i64(i64 123, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @poison_value_i64(ptr addrspace(1) %out) {
+; GFX8DAGISEL-LABEL: poison_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: poison_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: poison_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: poison_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: poison_value_i64:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: poison_value_i64:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX11DAGISEL-LABEL: poison_value_i64:
+; GFX11DAGISEL: ; %bb.0: ; %entry
+; GFX11DAGISEL-NEXT: s_endpgm
+;
+; GFX11GISEL-LABEL: poison_value_i64:
+; GFX11GISEL: ; %bb.0: ; %entry
+; GFX11GISEL-NEXT: s_endpgm
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.umin.i64(i64 poison, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define void @divergent_value_i64(ptr addrspace(1) %out, i64 %id.x) {
+; GFX8DAGISEL-LABEL: divergent_value_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX8DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX8DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX8DAGISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX8DAGISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX8DAGISEL-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX8DAGISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX8DAGISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX8DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX8DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX8DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX8DAGISEL-NEXT: ; %bb.2:
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8GISEL-LABEL: divergent_value_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b32 s4, -1
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX8GISEL-NEXT: s_mov_b32 s5, s4
+; GFX8GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX8GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX8GISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX8GISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX8GISEL-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX8GISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX8GISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX8GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX8GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX8GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX8GISEL-NEXT: ; %bb.2:
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9DAGISEL-LABEL: divergent_value_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX9DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX9DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX9DAGISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX9DAGISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX9DAGISEL-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX9DAGISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX9DAGISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX9DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX9DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX9DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX9DAGISEL-NEXT: ; %bb.2:
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9GISEL-LABEL: divergent_value_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b32 s4, -1
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX9GISEL-NEXT: s_mov_b32 s5, s4
+; GFX9GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX9GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX9GISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX9GISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX9GISEL-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX9GISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX9GISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX9GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX9GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX9GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX9GISEL-NEXT: ; %bb.2:
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX9GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX9GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064DAGISEL-LABEL: divergent_value_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX1064DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX1064DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX1064DAGISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX1064DAGISEL-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX1064DAGISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX1064DAGISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX1064DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1064DAGISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX1064DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1064DAGISEL-NEXT: ; %bb.2:
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1064GISEL-LABEL: divergent_value_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b32 s4, -1
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1064GISEL-NEXT: s_mov_b32 s5, s4
+; GFX1064GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1064GISEL-NEXT: s_ff1_i32_b64 s12, s[6:7]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX1064GISEL-NEXT: v_readlane_b32 s8, v2, s12
+; GFX1064GISEL-NEXT: v_readlane_b32 s9, v3, s12
+; GFX1064GISEL-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX1064GISEL-NEXT: s_and_b64 s[10:11], vcc, s[6:7]
+; GFX1064GISEL-NEXT: s_bitset0_b64 s[6:7], s12
+; GFX1064GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1064GISEL-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX1064GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1064GISEL-NEXT: ; %bb.2:
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1064GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1064GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032DAGISEL-LABEL: divergent_value_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: s_mov_b32 s4, -1
+; GFX1032DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032DAGISEL-NEXT: s_mov_b32 s5, s4
+; GFX1032DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s7, s6
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s8, v2, s7
+; GFX1032DAGISEL-NEXT: v_readlane_b32 s9, v3, s7
+; GFX1032DAGISEL-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[8:9], v[4:5]
+; GFX1032DAGISEL-NEXT: s_and_b32 s10, vcc_lo, s6
+; GFX1032DAGISEL-NEXT: s_bitset0_b32 s6, s7
+; GFX1032DAGISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1032DAGISEL-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1032DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1032DAGISEL-NEXT: ; %bb.2:
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1032GISEL-LABEL: divergent_value_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b32 s4, -1
+; GFX1032GISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1032GISEL-NEXT: s_mov_b32 s5, s4
+; GFX1032GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1032GISEL-NEXT: s_ff1_i32_b32 s7, s6
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v4, s4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v5, s5
+; GFX1032GISEL-NEXT: v_readlane_b32 s8, v2, s7
+; GFX1032GISEL-NEXT: v_readlane_b32 s9, v3, s7
+; GFX1032GISEL-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[8:9], v[4:5]
+; GFX1032GISEL-NEXT: s_and_b32 s10, vcc_lo, s6
+; GFX1032GISEL-NEXT: s_bitset0_b32 s6, s7
+; GFX1032GISEL-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
+; GFX1032GISEL-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1032GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1032GISEL-NEXT: ; %bb.2:
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032GISEL-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX1032GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164DAGISEL-LABEL: divergent_value_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: s_mov_b32 s0, -1
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164DAGISEL-NEXT: s_mov_b32 s1, s0
+; GFX1164DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v4, s0
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v5, s1
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s4, v2, s8
+; GFX1164DAGISEL-NEXT: v_readlane_b32 s5, v3, s8
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164DAGISEL-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[4:5]
+; GFX1164DAGISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
+; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s8
+; GFX1164DAGISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1164DAGISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1164DAGISEL-NEXT: ; %bb.2:
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1164DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1164GISEL-LABEL: divergent_value_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b32 s0, -1
+; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164GISEL-NEXT: s_mov_b32 s1, s0
+; GFX1164GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1164GISEL-NEXT: s_ctz_i32_b64 s8, s[2:3]
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v4, s0
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v5, s1
+; GFX1164GISEL-NEXT: v_readlane_b32 s4, v2, s8
+; GFX1164GISEL-NEXT: v_readlane_b32 s5, v3, s8
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[4:5]
+; GFX1164GISEL-NEXT: s_and_b64 s[6:7], vcc, s[2:3]
+; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s8
+; GFX1164GISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1164GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1164GISEL-NEXT: ; %bb.2:
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1164GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1164GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132DAGISEL-LABEL: divergent_value_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: s_mov_b32 s0, -1
+; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132DAGISEL-NEXT: s_mov_b32 s1, s0
+; GFX1132DAGISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132DAGISEL-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[4:5], v[4:5]
+; GFX1132DAGISEL-NEXT: s_and_b32 s6, vcc_lo, s2
+; GFX1132DAGISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX1132DAGISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1132DAGISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132DAGISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1132DAGISEL-NEXT: ; %bb.2:
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132DAGISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1132GISEL-LABEL: divergent_value_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b32 s0, -1
+; GFX1132GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132GISEL-NEXT: s_mov_b32 s1, s0
+; GFX1132GISEL-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1132GISEL-NEXT: s_ctz_i32_b32 s3, s2
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
+; GFX1132GISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX1132GISEL-NEXT: v_readlane_b32 s5, v3, s3
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[4:5], v[4:5]
+; GFX1132GISEL-NEXT: s_and_b32 s6, vcc_lo, s2
+; GFX1132GISEL-NEXT: s_bitset0_b32 s2, s3
+; GFX1132GISEL-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
+; GFX1132GISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132GISEL-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX1132GISEL-NEXT: ; %bb.2:
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX1132GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX1132GISEL-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %result = call i64 @llvm.amdgcn.wave.reduce.umin.i64(i64 %id.x, i32 1)
+ store i64 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @divergent_cfg_i64(ptr addrspace(1) %out, i64 %in, i64 %in2) {
+; GFX8DAGISEL-LABEL: divergent_cfg_i64:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX8DAGISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GFX8DAGISEL-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
+; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[6:7], s[6:7]
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8DAGISEL-NEXT: s_xor_b64 exec, exec, s[6:7]
+; GFX8DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX8DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX8DAGISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: divergent_cfg_i64:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX8GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX8GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX8GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX8GISEL-NEXT: ; %bb.1: ; %else
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX8GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX8GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX8GISEL-NEXT: ; %bb.3: ; %if
+; GFX8GISEL-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX8GISEL-NEXT: .LBB9_4: ; %endif
+; GFX8GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: divergent_cfg_i64:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX9DAGISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9DAGISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[4:5], s[4:5]
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: s_xor_b64 exec, exec, s[4:5]
+; GFX9DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX9DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX9DAGISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: divergent_cfg_i64:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX9GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX9GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX9GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX9GISEL-NEXT: ; %bb.1: ; %else
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX9GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX9GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX9GISEL-NEXT: ; %bb.3: ; %if
+; GFX9GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: s_mov_b64 s[6:7], s[6:7]
+; GFX9GISEL-NEXT: .LBB9_4: ; %endif
+; GFX9GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX1064DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1064DAGISEL: ; %bb.0: ; %entry
+; GFX1064DAGISEL-NEXT: s_clause 0x1
+; GFX1064DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0
+; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064DAGISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[4:5], s[4:5]
+; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1064DAGISEL-NEXT: s_xor_b64 exec, exec, s[4:5]
+; GFX1064DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1064DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1064DAGISEL-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064DAGISEL-NEXT: s_endpgm
+;
+; GFX1064GISEL-LABEL: divergent_cfg_i64:
+; GFX1064GISEL: ; %bb.0: ; %entry
+; GFX1064GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0
+; GFX1064GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1064GISEL-NEXT: s_and_saveexec_b64 s[8:9], vcc
+; GFX1064GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1064GISEL-NEXT: ; %bb.1: ; %else
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1064GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[8:9]
+; GFX1064GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1064GISEL-NEXT: ; %bb.3: ; %if
+; GFX1064GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064GISEL-NEXT: s_mov_b64 s[6:7], s[6:7]
+; GFX1064GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1064GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1064GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1064GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1064GISEL-NEXT: s_endpgm
+;
+; GFX1032DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1032DAGISEL: ; %bb.0: ; %entry
+; GFX1032DAGISEL-NEXT: s_clause 0x1
+; GFX1032DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc_lo, 15, v0
+; GFX1032DAGISEL-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032DAGISEL-NEXT: s_xor_b32 s4, exec_lo, s4
+; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s4, s4
+; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1032DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s4
+; GFX1032DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1032DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1032DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032DAGISEL-NEXT: s_endpgm
+;
+; GFX1032GISEL-LABEL: divergent_cfg_i64:
+; GFX1032GISEL: ; %bb.0: ; %entry
+; GFX1032GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 16, v0
+; GFX1032GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1032GISEL-NEXT: s_and_saveexec_b32 s8, vcc_lo
+; GFX1032GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1032GISEL-NEXT: ; %bb.1: ; %else
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1032GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s2, s8
+; GFX1032GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1032GISEL-NEXT: ; %bb.3: ; %if
+; GFX1032GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032GISEL-NEXT: s_mov_b64 s[6:7], s[6:7]
+; GFX1032GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1032GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1032GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1032GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1032GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_clause 0x1
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164DAGISEL-NEXT: s_mov_b64 s[6:7], exec
+; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX1164DAGISEL-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
+; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[6:7], s[6:7]
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: s_xor_b64 exec, exec, s[6:7]
+; GFX1164DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX1164DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1164DAGISEL-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: divergent_cfg_i64:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1164GISEL-NEXT: s_mov_b64 s[8:9], exec
+; GFX1164GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
+; GFX1164GISEL-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1164GISEL-NEXT: ; %bb.1: ; %else
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1164GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[2:3], s[8:9]
+; GFX1164GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1164GISEL-NEXT: ; %bb.3: ; %if
+; GFX1164GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX1164GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1164GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: divergent_cfg_i64:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_clause 0x1
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132DAGISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132DAGISEL-NEXT: s_mov_b32 s6, exec_lo
+; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
+; GFX1132DAGISEL-NEXT: s_xor_b32 s6, exec_lo, s6
+; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s6, s6
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s6
+; GFX1132DAGISEL-NEXT: ; %bb.1: ; %if
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX1132DAGISEL-NEXT: ; %bb.2: ; %endif
+; GFX1132DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s6
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: divergent_cfg_i64:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1132GISEL-NEXT: s_mov_b32 s8, exec_lo
+; GFX1132GISEL-NEXT: ; implicit-def: $sgpr6_sgpr7
+; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
+; GFX1132GISEL-NEXT: s_xor_b32 s8, exec_lo, s8
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB9_2
+; GFX1132GISEL-NEXT: ; %bb.1: ; %else
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1132GISEL-NEXT: .LBB9_2: ; %Flow
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s2, s8
+; GFX1132GISEL-NEXT: s_cbranch_execz .LBB9_4
+; GFX1132GISEL-NEXT: ; %bb.3: ; %if
+; GFX1132GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX1132GISEL-NEXT: .LBB9_4: ; %endif
+; GFX1132GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+entry:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %d_cmp = icmp ult i32 %tid, 16
+ br i1 %d_cmp, label %if, label %else
+
+if:
+ %reducedValTid = call i64 @llvm.amdgcn.wave.reduce.umin.i64(i64 %in2, i32 1)
+ br label %endif
+
+else:
+ %reducedValIn = call i64 @llvm.amdgcn.wave.reduce.umin.i64(i64 %in, i32 1)
+ br label %endif
+
+endif:
+ %combine = phi i64 [%reducedValTid, %if], [%reducedValIn, %else]
+ store i64 %combine, ptr addrspace(1) %out
+ ret void
+}
More information about the llvm-commits
mailing list