[llvm] [AMDGPU] Eliminate unnecessary packing in wider f16 vectors for sdwa/opsel-able instruction (PR #137137)
Pierre van Houtryve via llvm-commits
llvm-commits at lists.llvm.org
Fri Apr 25 04:21:50 PDT 2025
================
@@ -1361,6 +1379,499 @@ bool SIPeepholeSDWALegacy::runOnMachineFunction(MachineFunction &MF) {
return SIPeepholeSDWA().run(MF);
}
+static bool isSrcDestFP16Bits(MachineInstr *MI, const SIInstrInfo *TII) {
+ unsigned Opcode = MI->getOpcode();
+ if (TII->isSDWA(Opcode))
+ Opcode = AMDGPU::getBasicFromSDWAOp(Opcode);
+
+ switch (Opcode) {
+ case AMDGPU::V_CVT_F16_U16_e32:
+ case AMDGPU::V_CVT_F16_U16_e64:
+ case AMDGPU::V_CVT_F16_I16_e32:
+ case AMDGPU::V_CVT_F16_I16_e64:
+ case AMDGPU::V_RCP_F16_e64:
+ case AMDGPU::V_RCP_F16_e32:
+ case AMDGPU::V_RSQ_F16_e64:
+ case AMDGPU::V_RSQ_F16_e32:
+ case AMDGPU::V_SQRT_F16_e64:
+ case AMDGPU::V_SQRT_F16_e32:
+ case AMDGPU::V_LOG_F16_e64:
+ case AMDGPU::V_LOG_F16_e32:
+ case AMDGPU::V_EXP_F16_e64:
+ case AMDGPU::V_EXP_F16_e32:
+ case AMDGPU::V_SIN_F16_e64:
+ case AMDGPU::V_SIN_F16_e32:
+ case AMDGPU::V_COS_F16_e64:
+ case AMDGPU::V_COS_F16_e32:
+ case AMDGPU::V_FLOOR_F16_e64:
+ case AMDGPU::V_FLOOR_F16_e32:
+ case AMDGPU::V_CEIL_F16_e64:
+ case AMDGPU::V_CEIL_F16_e32:
+ case AMDGPU::V_TRUNC_F16_e64:
+ case AMDGPU::V_TRUNC_F16_e32:
+ case AMDGPU::V_RNDNE_F16_e64:
+ case AMDGPU::V_RNDNE_F16_e32:
+ case AMDGPU::V_FRACT_F16_e64:
+ case AMDGPU::V_FRACT_F16_e32:
+ case AMDGPU::V_FREXP_MANT_F16_e64:
+ case AMDGPU::V_FREXP_MANT_F16_e32:
+ case AMDGPU::V_FREXP_EXP_I16_F16_e64:
+ case AMDGPU::V_FREXP_EXP_I16_F16_e32:
+ case AMDGPU::V_LDEXP_F16_e64:
+ case AMDGPU::V_LDEXP_F16_e32:
+ case AMDGPU::V_ADD_F16_e64:
+ case AMDGPU::V_ADD_F16_e32:
+ case AMDGPU::V_SUB_F16_e64:
+ case AMDGPU::V_SUB_F16_e32:
+ case AMDGPU::V_SUBREV_F16_e64:
+ case AMDGPU::V_SUBREV_F16_e32:
+ case AMDGPU::V_MUL_F16_e64:
+ case AMDGPU::V_MUL_F16_e32:
+ case AMDGPU::V_MAX_F16_e64:
+ case AMDGPU::V_MAX_F16_e32:
+ case AMDGPU::V_MIN_F16_e64:
+ case AMDGPU::V_MIN_F16_e32:
+ case AMDGPU::V_MAD_F16_e64:
+ case AMDGPU::V_FMA_F16_e64:
+ case AMDGPU::V_DIV_FIXUP_F16_e64:
+ return true;
+ case AMDGPU::V_MADAK_F16:
+ case AMDGPU::V_MADMK_F16:
+ case AMDGPU::V_FMAMK_F16:
+ case AMDGPU::V_FMAAK_F16:
+ // NOTE : SKEPTICAL ABOUT IT
+ return false;
+ case AMDGPU::V_FMAC_F16_e32:
+ case AMDGPU::V_FMAC_F16_e64:
+ case AMDGPU::V_MAC_F16_e32:
+ case AMDGPU::V_MAC_F16_e64:
+ // As their sdwa version allow dst_sel to be equal only set to DWORD
+ default:
+ return false;
+ }
+}
+
+static bool checkForRightSrcRootAccess(MachineInstr *Def0MI,
+ MachineInstr *Def1MI,
+ Register SrcRootReg,
+ const SIInstrInfo *TII) {
+ // As if could, the Def1MI would have been sdwa-ed in order to access
+ // upper half, and Def0MI should not be as it accessing lower half.
+ if (!TII->isSDWA(Def1MI->getOpcode()) || TII->isSDWA(Def0MI->getOpcode()))
+ return false;
+
+ // Def1 should be writing into entire DWORD of dst, with unused part set
+ // to zero-pad.
+ MachineOperand *Def1DstSel =
+ TII->getNamedOperand(*Def1MI, AMDGPU::OpName::dst_sel);
+ if (!Def1DstSel || Def1DstSel->getImm() != AMDGPU::SDWA::SdwaSel::DWORD)
+ return false;
+ MachineOperand *Def1DstUnused =
+ TII->getNamedOperand(*Def1MI, AMDGPU::OpName::dst_unused);
+ if (!Def1DstUnused ||
+ Def1DstUnused->getImm() != AMDGPU::SDWA::DstUnused::UNUSED_PAD)
+ return false;
+
+ MachineOperand *Def1Src0 =
+ TII->getNamedOperand(*Def1MI, AMDGPU::OpName::src0);
+ MachineOperand *Def1Src1 =
+ TII->getNamedOperand(*Def1MI, AMDGPU::OpName::src1);
+ MachineOperand *Def0Src0 =
+ TII->getNamedOperand(*Def0MI, AMDGPU::OpName::src0);
+ MachineOperand *Def0Src1 =
+ TII->getNamedOperand(*Def0MI, AMDGPU::OpName::src1);
+
+ auto chkForDef0MIAccess = [&]() -> bool {
+ if (Def0Src0 && Def0Src0->isReg() && (Def0Src0->getReg() == SrcRootReg)) {
+ MachineOperand *Def0Src0Sel =
+ TII->getNamedOperand(*Def0MI, AMDGPU::OpName::src0_sel);
+ if (!Def0Src0Sel)
+ return true;
+ if (Def0Src0Sel && Def0Src0Sel->getImm() == AMDGPU::SDWA::SdwaSel::WORD_0)
+ return true;
+ }
+
+ if (Def0Src1 && Def0Src1->isReg() && (Def0Src1->getReg() == SrcRootReg)) {
+ MachineOperand *Def0Src1Sel =
+ TII->getNamedOperand(*Def0MI, AMDGPU::OpName::src1_sel);
+ if (!Def0Src1Sel)
+ return true;
+ if (Def0Src1Sel && Def0Src1Sel->getImm() == AMDGPU::SDWA::SdwaSel::WORD_0)
+ return true;
+ }
+
+ return false;
+ };
+
+ if (Def1Src0 && Def1Src0->isReg() && (Def1Src0->getReg() == SrcRootReg)) {
+ MachineOperand *Def1Src0Sel =
+ TII->getNamedOperand(*Def1MI, AMDGPU::OpName::src0_sel);
+ if (!Def1Src0Sel ||
+ (Def1Src0Sel->getImm() != AMDGPU::SDWA::SdwaSel::WORD_1))
+ return false;
+
+ if (chkForDef0MIAccess())
+ return true;
+ }
+
+ if (Def1Src1 && Def1Src1->isReg() && (Def1Src1->getReg() == SrcRootReg)) {
+ MachineOperand *Def1Src1Sel =
+ TII->getNamedOperand(*Def1MI, AMDGPU::OpName::src1_sel);
+ if (!Def1Src1Sel ||
+ (Def1Src1Sel->getImm() != AMDGPU::SDWA::SdwaSel::WORD_1))
+ return false;
+
+ if (chkForDef0MIAccess())
+ return true;
+ }
+
+ return false;
+}
+
+/// Given A and B are in the same MBB, returns true if A comes before B.
+static bool dominates(MachineBasicBlock::const_iterator A,
+ MachineBasicBlock::const_iterator B) {
+ assert(A->getParent() == B->getParent());
+ const MachineBasicBlock *MBB = A->getParent();
+ auto MBBEnd = MBB->end();
+ if (B == MBBEnd)
+ return true;
+
+ MachineBasicBlock::const_iterator I = MBB->begin();
+ for (; &*I != A && &*I != B; ++I)
+ ;
+
+ return &*I == A;
+}
+
+// Convert MI into its SDWA version with its Dst_Sel & SrcMO_Sel set with OpSel
+// and preserving the rest of Dst's bits.
+void SIPeepholeSDWA::convertMIToSDWAWithOpsel(MachineInstr &MI,
+ MachineOperand &SrcMO,
+ AMDGPU::SDWA::SdwaSel OpSel) {
+ LLVM_DEBUG(dbgs() << "Convert instruction:" << MI);
+
+ MachineInstr *SDWAInst;
+ if (TII->isSDWA(MI.getOpcode())) {
+ SDWAInst = &MI;
+ } else {
+ SDWAInst = createSDWAVersion(MI);
+ MI.eraseFromParent();
+ }
+
+ ConvertedInstructions.push_back(SDWAInst);
+ unsigned SDWAOpcode = SDWAInst->getOpcode();
+ ++NumSDWAInstructionsToEliminateFP16Pack;
+
+ MachineOperand *Dst = TII->getNamedOperand(*SDWAInst, AMDGPU::OpName::vdst);
+ assert(Dst && AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::vdst));
+
+ MachineOperand *DstSel =
+ TII->getNamedOperand(*SDWAInst, AMDGPU::OpName::dst_sel);
+ assert(DstSel &&
+ AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::dst_sel));
+ DstSel->setImm(OpSel);
+
+ MachineOperand *DstUnused =
+ TII->getNamedOperand(*SDWAInst, AMDGPU::OpName::dst_unused);
+ assert(DstUnused &&
+ AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::dst_unused));
+ assert(!(DstUnused->getImm() == AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) &&
+ "Dst_unused should not be UNUSED_PRESERVE already");
+ DstUnused->setImm(AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE);
+
+ auto PreserveDstIdx =
+ AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::vdst);
+ assert(PreserveDstIdx != -1);
+ auto NewSrcImplitMO = MachineOperand::CreateReg(SrcMO.getReg(), false, true);
+ copyRegOperand(NewSrcImplitMO, SrcMO);
+ SDWAInst->addOperand(NewSrcImplitMO);
+ SDWAInst->tieOperands(PreserveDstIdx, SDWAInst->getNumOperands() - 1);
+
+ MachineOperand *Src0 = TII->getNamedOperand(*SDWAInst, AMDGPU::OpName::src0);
+ assert(Src0 && AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::src0));
+ if (Src0->isReg() && (Src0->getReg() == SrcMO.getReg())) {
+ MachineOperand *Src0Sel =
+ TII->getNamedOperand(*SDWAInst, AMDGPU::OpName::src0_sel);
+ assert(Src0Sel &&
+ AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::src0_sel));
+ Src0Sel->setImm(OpSel);
+
+ LLVM_DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n');
+ return;
+ }
+
+ MachineOperand *Src1 = TII->getNamedOperand(*SDWAInst, AMDGPU::OpName::src1);
+ assert(Src1 && AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::src1));
+ if (Src1->isReg() && (Src1->getReg() == SrcMO.getReg())) {
+ MachineOperand *Src1Sel =
+ TII->getNamedOperand(*SDWAInst, AMDGPU::OpName::src1_sel);
+ assert(Src1Sel &&
+ AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::src1_sel));
+ Src1Sel->setImm(OpSel);
+
+ LLVM_DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n');
+ return;
+ }
+}
+
+// BackTracks the given Parent MI to look for any of its use operand that has
+// been defined by FP16 (sdwa-able) in recursive fashion.
+unsigned SIPeepholeSDWA::computeMIChainsForPackedOps(
+ MachineInstr *ParentMI, std::queue<MachineOperand *> &DefSrcQueue,
+ const GCNSubtarget &ST) {
+ unsigned NumOfFP16Def;
+ do {
+ MachineInstr *NextMIInChain = nullptr;
+ NumOfFP16Def = 0;
+ for (MachineOperand ¤tMO : ParentMI->uses()) {
+ if (!currentMO.isReg() || currentMO.getReg().isPhysical() ||
+ !MRI->hasOneUse(currentMO.getReg()))
+ continue;
+
+ MachineOperand *DefCurrMO = findSingleRegDef(¤tMO, MRI);
+ if (!DefCurrMO)
+ continue;
+
+ MachineInstr *DefCurrMI = DefCurrMO->getParent();
+ if (!isSrcDestFP16Bits(DefCurrMI, TII) ||
+ !isConvertibleToSDWA(*DefCurrMI, ST, TII))
+ continue;
+
+ NextMIInChain = DefCurrMI;
+ DefSrcQueue.push(DefCurrMO);
+ NumOfFP16Def++;
+ }
+
+ if (NumOfFP16Def > 1)
+ break;
+
+ ParentMI = NextMIInChain;
+ } while (ParentMI);
+
+ return NumOfFP16Def;
+}
+
+void SIPeepholeSDWA::eliminateFP16Packing(MachineBasicBlock &MBB,
+ const GCNSubtarget &ST) {
+ if (!ST.has16BitInsts())
+ return;
+
+ for (MachineInstr &MI : make_early_inc_range(MBB)) {
+ if (MI.getOpcode() == AMDGPU::V_PACK_B32_F16_e64) {
+ LLVM_DEBUG(dbgs() << "\nCandidate FP16 Packed MI : " << MI << '\n');
+ std::queue<MachineOperand *> DefSrc0Queue;
+ std::queue<MachineOperand *> DefSrc1Queue;
+ MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
+ MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
+
+ if (!Src0->isReg() || Src0->getReg().isPhysical() ||
+ !MRI->hasOneUse(Src0->getReg()) || !Src1->isReg() ||
+ Src1->getReg().isPhysical() || !MRI->hasOneUse(Src1->getReg()))
+ continue;
+
+ MachineOperand *Op0 = findSingleRegDef(Src0, MRI);
+ MachineOperand *Op1 = findSingleRegDef(Src1, MRI);
+
+ if (!Op0 || !Op1)
+ continue;
+
+ MachineInstr *ParentMIOp0 = Op0->getParent();
+ MachineInstr *ParentMIOp1 = Op1->getParent();
+
+ if (!isSrcDestFP16Bits(ParentMIOp0, TII) ||
+ !isSrcDestFP16Bits(ParentMIOp1, TII) ||
+ !isConvertibleToSDWA(*ParentMIOp0, ST, TII) ||
+ !isConvertibleToSDWA(*ParentMIOp1, ST, TII))
+ continue;
+
+ DefSrc0Queue.push(Op0);
+ DefSrc1Queue.push(Op1);
+
+ // This checks for the given MI, that it only has exact one register MO
+ // use , that is defined by pure FP16 instruction (that is SDWA-able too)
+ unsigned NumOfFP16Def;
+
+ NumOfFP16Def = computeMIChainsForPackedOps(ParentMIOp0, DefSrc0Queue, ST);
+ if (NumOfFP16Def > 1)
+ continue;
+
+ NumOfFP16Def = computeMIChainsForPackedOps(ParentMIOp1, DefSrc1Queue, ST);
+ if (NumOfFP16Def > 1)
+ continue;
+
+ MachineInstr *Def0RootMI = (DefSrc0Queue.back())->getParent();
+ MachineInstr *Def1RootMI = (DefSrc1Queue.back())->getParent();
+ Register SrcRootMOReg = AMDGPU::NoRegister;
+
+ // Now, check if the last operation for each in of the DefSrcQueue
+ // has the common MO, that would be the source root MO for element-wise
+ // fp16 chain operations
+ for (MachineOperand &Current0MO : Def0RootMI->uses()) {
+ if (!Current0MO.isReg() || Current0MO.getReg().isPhysical())
+ continue;
+
+ for (MachineOperand &Current1MO : Def1RootMI->uses()) {
+ if (!Current1MO.isReg() || Current1MO.getReg().isPhysical())
+ continue;
+
+ if (Current0MO.getReg() == Current1MO.getReg() &&
+ Current0MO.getSubReg() == Current1MO.getSubReg()) {
+ SrcRootMOReg = Current0MO.getReg();
+ break;
+ }
+ }
+ // Found it, no more check needed, so break;
+ if (SrcRootMOReg != AMDGPU::NoRegister)
+ break;
+ }
+
+ if (SrcRootMOReg == AMDGPU::NoRegister)
+ continue;
+
+ // Also we need to ensure that each of the DefXRootMI should access the
+ // lower and upper half word of SrcRootMOReg respectively.
+ if (!checkForRightSrcRootAccess(Def0RootMI, Def1RootMI, SrcRootMOReg,
+ TII))
+ continue;
+
+ // The graph below represents the connection :
+ // Op0Intial --> Op0x --> ... --> Op0Final
+ // / \'
+ // SrcRootMO v_Pack_b32_f16
+ // \ /
+ // Op1Intial --> Op1x --> ... --> Op1Final
+ // The nomenclature is based upon above flow-graph
+ //
+ // Also for each of DefSrcXQueue :
+ // OpXIntial is at back & OpXFinal is at front
+ auto Op0FinalMI = (DefSrc0Queue.front())->getParent();
+ auto Op1FinalMI = (DefSrc1Queue.front())->getParent();
+ auto Op0IntialMI = (DefSrc0Queue.back())->getParent();
+ auto Op1IntialMI = (DefSrc1Queue.back())->getParent();
+
+ MachineOperand *FinalOutMO = nullptr;
+ std::queue<MachineOperand *> ChainedDefOps;
+ AMDGPU::SDWA::SdwaSel OpSel = AMDGPU::SDWA::SdwaSel::DWORD;
+ int NumOfElemInSecondOpChain = 0;
+
+ // Now, we will change the flow as per the dominace of MI as follows, if
+ // possible and store it in ChainedDefOps, so later can be used to convert
+ // into its SDWA version:
+ //
+ // If (dominates(Op0FinalMI, Op1IntialMI)) == TRUE
+ // SrcRootMO -> Op0Intial -> Op0x -> ... -> Op0Final
+ // -> Op1Intial -> Op1x -> ... -> Op1Final (FinalOutMO)
+ //
+ // If (dominates(Op1FinalMI, Op0IntialMI)) == TRUE
+ // SrcRootMO -> Op1Intial -> Op1x -> ... -> Op1Final
+ // -> Op0Intial -> Op0x -> ... -> Op0Final (FinalOutMO)
+ //
+ // TODO : Else, not handled!
+ // One such case is observed when multiple fp16 instruction are chained
+ // on a fp16 vector input. For Example :
+ //
+ // %1 = call <2 x half> @llvm.log.v2f16 (<2 x half> %0)
+ // %res = call <2 x half> @llvm.sin.v2f16 (<2 x half> %1)
+ // return <2 x half> %res
+ if (dominates(Op0FinalMI, Op1IntialMI)) {
----------------
Pierre-vh wrote:
The code in both cases seems like a copy paste with minor changes. Can you try to canonicalize the order so it can all be handled in one go?
https://github.com/llvm/llvm-project/pull/137137
More information about the llvm-commits
mailing list