[llvm-branch-commits] [llvm] [AMDGPU] Add wave reduce intrinsics for float types - 2 (PR #161815)
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri Oct 3 02:50:45 PDT 2025
https://github.com/easyonaadit created https://github.com/llvm/llvm-project/pull/161815
Supported Ops: `fadd`, `fsub`
>From 9165bcabdf9cab8a204bd8fbbf18e1801de58572 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Mon, 29 Sep 2025 18:58:10 +0530
Subject: [PATCH] [AMDGPU] Add wave reduce intrinsics for float types - 2
Supported Ops: `fadd`, `fsub`
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 54 +++++++++++++++++++++--
llvm/lib/Target/AMDGPU/SIInstructions.td | 2 +
2 files changed, 53 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 3799581664081..0423803833e03 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5330,11 +5330,14 @@ static uint32_t getIdentityValueFor32BitWaveReduction(unsigned Opc) {
case AMDGPU::S_MAX_U32:
return std::numeric_limits<uint32_t>::min();
case AMDGPU::S_MAX_I32:
+ case AMDGPU::V_SUB_F32_e64: // +0.0
return std::numeric_limits<int32_t>::min();
case AMDGPU::S_ADD_I32:
case AMDGPU::S_SUB_I32:
case AMDGPU::S_OR_B32:
case AMDGPU::S_XOR_B32:
+ case AMDGPU::V_ADD_F32_e64: // -0.0
+ // return 0x00000000; // -0.0
return std::numeric_limits<uint32_t>::min();
case AMDGPU::S_AND_B32:
return std::numeric_limits<uint32_t>::max();
@@ -5346,6 +5349,13 @@ static uint32_t getIdentityValueFor32BitWaveReduction(unsigned Opc) {
std::memcpy(&bits, &nanf, sizeof(bits));
return bits;
}
+ // case AMDGPU::V_SUB_F32_e64: {
+ // float nanf = std::numeric_limits<float>::zero();
+ // uint32_t bits;
+ // assert(sizeof(bits) == sizeof(nanf) && "Huh?");
+ // std::memcpy(&bits, &nanf, sizeof(bits));
+ // return 0x80000000; // +0.0
+ // }
default:
llvm_unreachable(
"Unexpected opcode in getIdentityValueFor32BitWaveReduction");
@@ -5381,11 +5391,13 @@ static bool is32bitWaveReduceOperation(unsigned Opc) {
Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32 ||
Opc == AMDGPU::S_AND_B32 || Opc == AMDGPU::S_OR_B32 ||
Opc == AMDGPU::S_XOR_B32 || Opc == AMDGPU::V_MIN_F32_e64 ||
- Opc == AMDGPU::V_MAX_F32_e64;
+ Opc == AMDGPU::V_MAX_F32_e64 || Opc == AMDGPU::V_ADD_F32_e64 ||
+ Opc == AMDGPU::V_SUB_F32_e64;
}
static bool isFloatingPointWaveReduceOperation(unsigned Opc) {
- return Opc == AMDGPU::V_MIN_F32_e64 || Opc == AMDGPU::V_MAX_F32_e64;
+ return Opc == AMDGPU::V_MIN_F32_e64 || Opc == AMDGPU::V_MAX_F32_e64 ||
+ Opc == AMDGPU::V_ADD_F32_e64 || Opc == AMDGPU::V_SUB_F32_e64;
}
static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
@@ -5432,8 +5444,10 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
case AMDGPU::S_XOR_B64:
case AMDGPU::S_ADD_I32:
case AMDGPU::S_ADD_U64_PSEUDO:
+ case AMDGPU::V_ADD_F32_e64:
case AMDGPU::S_SUB_I32:
- case AMDGPU::S_SUB_U64_PSEUDO: {
+ case AMDGPU::S_SUB_U64_PSEUDO:
+ case AMDGPU::V_SUB_F32_e64: {
const TargetRegisterClass *WaveMaskRegClass = TRI->getWaveMaskRegClass();
const TargetRegisterClass *DstRegClass = MRI.getRegClass(DstReg);
Register ExecMask = MRI.createVirtualRegister(WaveMaskRegClass);
@@ -5588,6 +5602,36 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
.addImm(AMDGPU::sub1);
break;
}
+ case AMDGPU::V_ADD_F32_e64:
+ case AMDGPU::V_SUB_F32_e64: {
+ /// for FPop: #activebits: int, src: float.
+ /// convert int to float, and then mul. there is only V_MUL_F32, so copy to vgpr.
+ /// /home/aalokdes/dockerx/work/llvm-trunk/llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s32.mir
+ /// ig: 1(01) -> negation, 2(10) -> abs, 3(11) -> abs and neg
+ // V_CVT_F32_I32_e64
+ // get #active lanes in vgpr
+ Register ActiveLanesVreg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ Register DstVreg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::V_CVT_F32_I32_e64), ActiveLanesVreg)
+ // .addReg(SrcReg)
+ .addReg(NewAccumulator->getOperand(0).getReg())
+ .addImm(0) // clamp
+ .addImm(0); // output-modifier
+
+ // Multiply numactivelanes * src
+ // Take negation of input for SUB reduction
+ unsigned srcMod = Opc == AMDGPU::V_SUB_F32_e64 ? 1 : 0; // check this to make sure i am taking negation
+ BuildMI(BB, MI, DL, TII->get(AMDGPU::V_MUL_F32_e64), DstVreg)
+ .addImm(srcMod) // src0 modifier
+ .addReg(SrcReg)
+ .addImm(0) // src1 modifier
+ .addReg(ActiveLanesVreg)
+ .addImm(0) // clamp
+ .addImm(0); // output-mod
+ BuildMI(BB, MI, DL,
+ TII->get(AMDGPU::V_READFIRSTLANE_B32), DstReg)
+ .addReg(DstVreg);
+ }
}
RetBB = &BB;
}
@@ -5832,10 +5876,14 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_ADD_I32);
case AMDGPU::WAVE_REDUCE_ADD_PSEUDO_U64:
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_ADD_U64_PSEUDO);
+ case AMDGPU::WAVE_REDUCE_ADD_PSEUDO_F32:
+ return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_ADD_F32_e64);
case AMDGPU::WAVE_REDUCE_SUB_PSEUDO_I32:
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_SUB_I32);
case AMDGPU::WAVE_REDUCE_SUB_PSEUDO_U64:
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_SUB_U64_PSEUDO);
+ case AMDGPU::WAVE_REDUCE_SUB_PSEUDO_F32:
+ return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_SUB_F32_e64);
case AMDGPU::WAVE_REDUCE_AND_PSEUDO_B32:
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_AND_B32);
case AMDGPU::WAVE_REDUCE_AND_PSEUDO_B64:
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index b0ecf2b56aaef..f3e5ae3179cb9 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -375,6 +375,8 @@ defvar Operations = [
WaveReduceOp<"min", "F32", f32, SGPR_32, VSrc_b32>,
WaveReduceOp<"max", "F32", f32, SGPR_32, VSrc_b32>,
+ WaveReduceOp<"add", "F32", f32, SGPR_32, VSrc_b32>,
+ WaveReduceOp<"sub", "F32", f32, SGPR_32, VSrc_b32>,
];
foreach Op = Operations in {
More information about the llvm-branch-commits
mailing list