[llvm] [AMDGPU] Implement vop3p complex pattern optmization for gisel (PR #130234)

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Sun Mar 9 20:06:12 PDT 2025


================
@@ -4282,44 +4282,364 @@ AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
   }};
 }
 
-std::pair<Register, unsigned>
-AMDGPUInstructionSelector::selectVOP3PModsImpl(
-  Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const {
-  unsigned Mods = 0;
-  MachineInstr *MI = MRI.getVRegDef(Src);
+enum srcStatus {
+  IS_SAME,
+  IS_UPPER_HALF,
+  IS_LOWER_HALF,
+  IS_NEG,
+  IS_UPPER_HALF_NEG,
+  IS_LOWER_HALF_NEG,
+  LAST_STAT = IS_LOWER_HALF_NEG
+};
+
+bool isTruncHalf(MachineInstr *MI, const MachineRegisterInfo &MRI) {
+  assert(MI->getOpcode() == AMDGPU::G_TRUNC);
+  unsigned dstSize = MRI.getType(MI->getOperand(0).getReg()).getSizeInBits();
+  unsigned srcSize = MRI.getType(MI->getOperand(1).getReg()).getSizeInBits();
+  return dstSize * 2 == srcSize;
+}
+
+bool isLshrHalf(MachineInstr *MI, const MachineRegisterInfo &MRI) {
+  assert(MI->getOpcode() == AMDGPU::G_LSHR);
+  Register ShiftSrc;
+  std::optional<ValueAndVReg> ShiftAmt;
+  if (mi_match(MI->getOperand(0).getReg(), MRI,
+               m_GLShr(m_Reg(ShiftSrc), m_GCst(ShiftAmt)))) {
+    unsigned srcSize = MRI.getType(MI->getOperand(1).getReg()).getSizeInBits();
+    unsigned shift = ShiftAmt->Value.getZExtValue();
+    return shift * 2 == srcSize;
+  }
+  return false;
+}
 
-  if (MI->getOpcode() == AMDGPU::G_FNEG &&
-      // It's possible to see an f32 fneg here, but unlikely.
-      // TODO: Treat f32 fneg as only high bit.
-      MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
-    Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
-    Src = MI->getOperand(1).getReg();
-    MI = MRI.getVRegDef(Src);
+bool isShlHalf(MachineInstr *MI, const MachineRegisterInfo &MRI) {
+  assert(MI->getOpcode() == AMDGPU::G_SHL);
+  Register ShiftSrc;
+  std::optional<ValueAndVReg> ShiftAmt;
+  if (mi_match(MI->getOperand(0).getReg(), MRI,
+               m_GShl(m_Reg(ShiftSrc), m_GCst(ShiftAmt)))) {
+    unsigned srcSize = MRI.getType(MI->getOperand(1).getReg()).getSizeInBits();
+    unsigned shift = ShiftAmt->Value.getZExtValue();
+    return shift * 2 == srcSize;
+  }
+  return false;
+}
+
+bool retOpStat(MachineOperand *Op, int stat,
+               std::pair<MachineOperand *, int> &curr) {
+  if ((Op->isReg() && !(Op->getReg().isPhysical())) || Op->isImm() ||
+      Op->isCImm() || Op->isFPImm()) {
+    curr = {Op, stat};
+    return true;
+  }
+  return false;
+}
+
+bool calcNextStatus(std::pair<MachineOperand *, int> &curr,
+                    const MachineRegisterInfo &MRI) {
+  if (!curr.first->isReg()) {
+    return false;
+  }
+  MachineInstr *MI = nullptr;
+
+  if (!curr.first->isDef()) {
+    // MRI.getVRegDef falls into infinite loop if use define reg
+    MI = MRI.getVRegDef(curr.first->getReg());
+  } else {
+    MI = curr.first->getParent();
+  }
+  if (!MI) {
+    return false;
+  }
+
+  unsigned Opc = MI->getOpcode();
+
+  // Handle general Opc cases
+  switch (Opc) {
+  case AMDGPU::G_BITCAST:
+  case AMDGPU::G_CONSTANT:
+  case AMDGPU::G_FCONSTANT:
+  case AMDGPU::COPY:
+    return retOpStat(&MI->getOperand(1), curr.second, curr);
+  case AMDGPU::G_FNEG:
+    // XXXX + 3 = XXXX_NEG, (XXXX_NEG + 3) mod 3 = XXXX
+    return retOpStat(&MI->getOperand(1),
+                     (curr.second + ((LAST_STAT + 1) / 2)) % (LAST_STAT + 1),
+                     curr);
+  }
+
+  // Calc next stat from current stat
+  switch (curr.second) {
+  case IS_SAME:
+    switch (Opc) {
+    case AMDGPU::G_TRUNC: {
+      if (isTruncHalf(MI, MRI)) {
+        return retOpStat(&MI->getOperand(1), IS_LOWER_HALF, curr);
+      }
+      break;
+    }
+    }
+    break;
+  case IS_NEG:
+    switch (Opc) {
+    case AMDGPU::G_TRUNC: {
+      if (isTruncHalf(MI, MRI)) {
+        return retOpStat(&MI->getOperand(1), IS_LOWER_HALF_NEG, curr);
+      }
+      break;
+    }
+    }
+    break;
+  case IS_UPPER_HALF:
+    switch (Opc) {
+    case AMDGPU::G_SHL: {
+      if (isShlHalf(MI, MRI)) {
+        return retOpStat(&MI->getOperand(1), IS_LOWER_HALF, curr);
+      }
+      break;
+    }
+    }
+    break;
+  case IS_LOWER_HALF:
+    switch (Opc) {
+    case AMDGPU::G_LSHR: {
+      if (isLshrHalf(MI, MRI)) {
+        return retOpStat(&MI->getOperand(1), IS_UPPER_HALF, curr);
+      }
+      break;
+    }
+    }
+    break;
+  case IS_UPPER_HALF_NEG:
+    switch (Opc) {
+    case AMDGPU::G_SHL: {
+      if (isShlHalf(MI, MRI)) {
+        return retOpStat(&MI->getOperand(1), IS_LOWER_HALF_NEG, curr);
+      }
+      break;
+    }
+    }
+    break;
+  case IS_LOWER_HALF_NEG:
+    switch (Opc) {
+    case AMDGPU::G_LSHR: {
+      if (isLshrHalf(MI, MRI)) {
+        return retOpStat(&MI->getOperand(1), IS_UPPER_HALF_NEG, curr);
+      }
+      break;
+    }
+    }
+    break;
+  }
+  return false;
+}
+
+std::vector<std::pair<MachineOperand *, int>>
+getSrcStats(MachineOperand *Op, const MachineRegisterInfo &MRI,
+            bool onlyLastSameOrNeg = false, int maxDepth = 6) {
+  int depth = 0;
+  std::pair<MachineOperand *, int> curr = {Op, IS_SAME};
+  std::vector<std::pair<MachineOperand *, int>> statList;
+
+  while (true) {
+    depth++;
+    if (depth > maxDepth) {
+      break;
+    }
+    bool ret = calcNextStatus(curr, MRI);
+    if (!ret || (onlyLastSameOrNeg &&
+                 (curr.second != IS_SAME && curr.second != IS_NEG))) {
+      break;
+    } else if (!onlyLastSameOrNeg) {
+      statList.push_back(curr);
+    }
   }
+  if (onlyLastSameOrNeg) {
+    statList.push_back(curr);
+  }
+  return statList;
+}
 
-  // TODO: Handle G_FSUB 0 as fneg
+bool isInlinableConstant(MachineOperand *Op, const SIInstrInfo &TII) {
+  bool a = TII.isInlineConstant(*Op);
+  switch (Op->getType()) {
+  case MachineOperand::MachineOperandType::MO_Immediate:
+    return TII.isInlineConstant(*Op);
+  case MachineOperand::MachineOperandType::MO_CImmediate:
+    return TII.isInlineConstant(Op->getCImm()->getValue());
+  case MachineOperand::MachineOperandType::MO_FPImmediate:
+    return TII.isInlineConstant(Op->getFPImm()->getValueAPF());
+  }
+  return false;
+}
 
-  // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
-  (void)IsDOT; // DOTs do not use OPSEL on gfx942+, check ST.hasDOTOpSelHazard()
+bool isSameBitWidth(MachineOperand *Op1, MachineOperand *Op2,
+                    const MachineRegisterInfo &MRI) {
+  unsigned width1 = MRI.getType(Op1->getReg()).getSizeInBits();
+  unsigned width2 = MRI.getType(Op2->getReg()).getSizeInBits();
+  return width1 == width2;
+}
 
+bool isSameOperand(MachineOperand *Op1, MachineOperand *Op2) {
+  if (Op1->isReg()) {
+    if (Op2->isReg()) {
+      return Op1->getReg() == Op2->getReg();
+    }
+    return false;
+  }
+  return Op1->isIdenticalTo(*Op2);
+}
+
+bool validToPack(int HiStat, int LoStat, unsigned int &Mods,
+                 MachineOperand *newOp, MachineOperand *RootOp,
+                 const SIInstrInfo &TII, const MachineRegisterInfo &MRI) {
+  if (newOp->isReg()) {
+    if (isSameBitWidth(newOp, RootOp, MRI)) {
+      // IS_LOWER_HALF remain 0
+      if (HiStat == IS_UPPER_HALF_NEG) {
+        Mods ^= SISrcMods::NEG_HI;
+        Mods |= SISrcMods::OP_SEL_1;
+      } else if (HiStat == IS_UPPER_HALF) {
+        Mods |= SISrcMods::OP_SEL_1;
+      } else if (HiStat == IS_LOWER_HALF_NEG) {
+        Mods ^= SISrcMods::NEG_HI;
+      }
+      if (LoStat == IS_UPPER_HALF_NEG) {
+        Mods ^= SISrcMods::NEG;
+        Mods |= SISrcMods::OP_SEL_0;
+      } else if (LoStat == IS_UPPER_HALF) {
+        Mods |= SISrcMods::OP_SEL_0;
+      } else if (LoStat == IS_UPPER_HALF_NEG) {
+        Mods |= SISrcMods::NEG;
+      }
+      return true;
+    }
+  } else {
+    if ((HiStat == IS_SAME || HiStat == IS_NEG) &&
+        (LoStat == IS_SAME || LoStat == IS_NEG) &&
+        isInlinableConstant(newOp, TII)) {
+      if (HiStat == IS_NEG) {
+        Mods ^= SISrcMods::NEG_HI;
+      }
+      if (LoStat == IS_NEG) {
+        Mods ^= SISrcMods::NEG;
+      }
+      // opsel = opsel_hi = 0, since the upper half and lower half both
+      // the same as the target inlinable constant
+      return true;
+    }
+  }
+  return false;
+}
+
+std::pair<MachineOperand *, unsigned>
+AMDGPUInstructionSelector::selectVOP3PModsImpl(MachineOperand *Op,
+                                               const MachineRegisterInfo &MRI,
+                                               bool IsDOT) const {
+  unsigned Mods = 0;
+  MachineOperand *RootOp = Op;
+  std::pair<MachineOperand *, int> stat = getSrcStats(Op, MRI, true)[0];
+  if (!stat.first->isReg()) {
+    Mods |= SISrcMods::OP_SEL_1;
+    return {Op, Mods};
+  }
+  if (stat.second == IS_NEG) {
+    Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
+  }
+  Op = stat.first;
+  MachineInstr *MI = MRI.getVRegDef(Op->getReg());
+  if (MI->getOpcode() == AMDGPU::G_BUILD_VECTOR && MI->getNumOperands() == 3 &&
+      (!IsDOT || !Subtarget->hasDOTOpSelHazard())) {
+    std::vector<std::pair<MachineOperand *, int>> statList_Hi;
+    std::vector<std::pair<MachineOperand *, int>> statList_Lo;
+    statList_Hi = getSrcStats(&MI->getOperand(2), MRI);
+    if (statList_Hi.size() != 0) {
+      statList_Lo = getSrcStats(&MI->getOperand(1), MRI);
+      if (statList_Lo.size() != 0) {
+        for (int i = statList_Hi.size() - 1; i >= 0; i--) {
+          for (int j = statList_Lo.size() - 1; j >= 0; j--) {
+            if (isSameOperand(statList_Hi[i].first, statList_Lo[j].first)) {
+              if (validToPack(statList_Hi[i].second, statList_Lo[j].second,
+                              Mods, statList_Hi[i].first, RootOp, TII, MRI)) {
+                return {statList_Hi[i].first, Mods};
+              }
+            }
+          }
+        }
+      }
+    }
+  }
   // Packed instructions do not have abs modifiers.
   Mods |= SISrcMods::OP_SEL_1;
 
-  return std::pair(Src, Mods);
+  return {Op, Mods};
+}
+
+int64_t getAllKindImm(MachineOperand *Op) {
+  switch (Op->getType()) {
+  case MachineOperand::MachineOperandType::MO_Immediate:
+    return Op->getImm();
+  case MachineOperand::MachineOperandType::MO_CImmediate:
+    return Op->getCImm()->getSExtValue();
+    break;
+  case MachineOperand::MachineOperandType::MO_FPImmediate:
+    return Op->getFPImm()->getValueAPF().bitcastToAPInt().getSExtValue();
+    break;
+  }
+  llvm_unreachable("not an imm type");
+}
+
+bool checkRB(MachineOperand *Op, int RBNo, const AMDGPURegisterBankInfo &RBI,
+             const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) {
+  const RegisterBank *RB = RBI.getRegBank(Op->getReg(), MRI, TRI);
+  return RB->getID() == RBNo;
+}
+
+MachineOperand *getVReg(MachineOperand *newOp, MachineOperand *RootOp,
+                        const AMDGPURegisterBankInfo &RBI,
+                        MachineRegisterInfo &MRI,
+                        const TargetRegisterInfo &TRI) {
+  // RootOp can only be VGPR or SGPR (some hand written cases such as
+  // inst-select-ashr.v2s16.mir::ashr_v2s16_vs)
+  if (checkRB(RootOp, AMDGPU::SGPRRegBankID, RBI, MRI, TRI) ||
+      checkRB(newOp, AMDGPU::VGPRRegBankID, RBI, MRI, TRI)) {
+    return newOp;
+  }
+  MachineInstr *MI = MRI.getVRegDef(RootOp->getReg());
+  if (MI->getOpcode() == AMDGPU::COPY &&
+      isSameOperand(newOp, &MI->getOperand(1))) {
+    // RootOp is VGPR, newOp is not VGPR, but RootOp = COPY newOp
+    return RootOp;
+  }
+
+  const TargetRegisterClass *DstRC =
+      TRI.getConstrainedRegClassForOperand(*RootOp, MRI);
+  Register dstReg = MRI.createVirtualRegister(DstRC);
+
+  MachineIRBuilder B(*RootOp->getParent());
----------------
arsenm wrote:

Don't create temporary MachineIRBuilders. The final selector should just use BuildMI 

https://github.com/llvm/llvm-project/pull/130234


More information about the llvm-commits mailing list