[llvm] [AMDGPU] Implement vop3p complex pattern optmization for gisel (PR #130234)
via llvm-commits
llvm-commits at lists.llvm.org
Sun Mar 9 22:32:26 PDT 2025
================
@@ -4282,44 +4282,364 @@ AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
}};
}
-std::pair<Register, unsigned>
-AMDGPUInstructionSelector::selectVOP3PModsImpl(
- Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const {
- unsigned Mods = 0;
- MachineInstr *MI = MRI.getVRegDef(Src);
+enum srcStatus {
+ IS_SAME,
+ IS_UPPER_HALF,
+ IS_LOWER_HALF,
+ IS_NEG,
+ IS_UPPER_HALF_NEG,
+ IS_LOWER_HALF_NEG,
+ LAST_STAT = IS_LOWER_HALF_NEG
+};
+
+bool isTruncHalf(MachineInstr *MI, const MachineRegisterInfo &MRI) {
+ assert(MI->getOpcode() == AMDGPU::G_TRUNC);
+ unsigned dstSize = MRI.getType(MI->getOperand(0).getReg()).getSizeInBits();
+ unsigned srcSize = MRI.getType(MI->getOperand(1).getReg()).getSizeInBits();
+ return dstSize * 2 == srcSize;
+}
+
+bool isLshrHalf(MachineInstr *MI, const MachineRegisterInfo &MRI) {
+ assert(MI->getOpcode() == AMDGPU::G_LSHR);
+ Register ShiftSrc;
+ std::optional<ValueAndVReg> ShiftAmt;
+ if (mi_match(MI->getOperand(0).getReg(), MRI,
+ m_GLShr(m_Reg(ShiftSrc), m_GCst(ShiftAmt)))) {
+ unsigned srcSize = MRI.getType(MI->getOperand(1).getReg()).getSizeInBits();
+ unsigned shift = ShiftAmt->Value.getZExtValue();
+ return shift * 2 == srcSize;
+ }
+ return false;
+}
- if (MI->getOpcode() == AMDGPU::G_FNEG &&
- // It's possible to see an f32 fneg here, but unlikely.
- // TODO: Treat f32 fneg as only high bit.
- MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
- Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
- Src = MI->getOperand(1).getReg();
- MI = MRI.getVRegDef(Src);
+bool isShlHalf(MachineInstr *MI, const MachineRegisterInfo &MRI) {
+ assert(MI->getOpcode() == AMDGPU::G_SHL);
+ Register ShiftSrc;
+ std::optional<ValueAndVReg> ShiftAmt;
+ if (mi_match(MI->getOperand(0).getReg(), MRI,
+ m_GShl(m_Reg(ShiftSrc), m_GCst(ShiftAmt)))) {
+ unsigned srcSize = MRI.getType(MI->getOperand(1).getReg()).getSizeInBits();
+ unsigned shift = ShiftAmt->Value.getZExtValue();
+ return shift * 2 == srcSize;
+ }
+ return false;
+}
+
+bool retOpStat(MachineOperand *Op, int stat,
----------------
Shoreshen wrote:
Hi @arsenm , this is because I have to imply COPY for cases that copy from SGPR to VGPR (this happens usually after build_vector).
While COPY can also used to copy from physical registers, I need to block physical register to be selected, since they are not SSA (e.g. used as return value and cause infinite loop).
https://github.com/llvm/llvm-project/pull/130234
More information about the llvm-commits
mailing list