[llvm] [RISCV][NFC] Convert some predicates to TIIPredicate (PR #129658)

Pengcheng Wang via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 22 06:13:06 PDT 2025


https://github.com/wangpc-pp updated https://github.com/llvm/llvm-project/pull/129658

>From d94e5e9a57f284fd86a749f294bc6d3f97c04440 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Tue, 4 Mar 2025 14:52:29 +0800
Subject: [PATCH 1/2] [RISCV] Convert some predicates to TIIPredicate

These predicates can also be used in macro fusion and scheduling
model.
---
 llvm/lib/Target/RISCV/RISCV.td                |   6 +
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |  21 +--
 llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp  | 118 +++------------
 .../lib/Target/RISCV/RISCVInsertWriteVXRM.cpp |  15 +-
 llvm/lib/Target/RISCV/RISCVInstrInfo.cpp      |  27 +---
 llvm/lib/Target/RISCV/RISCVInstrInfo.h        |   7 +-
 llvm/lib/Target/RISCV/RISCVInstrPredicates.td | 143 ++++++++++++++++++
 llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp     |   2 +-
 8 files changed, 186 insertions(+), 153 deletions(-)
 create mode 100644 llvm/lib/Target/RISCV/RISCVInstrPredicates.td

diff --git a/llvm/lib/Target/RISCV/RISCV.td b/llvm/lib/Target/RISCV/RISCV.td
index 2c2271e486a84..6ecf6460d1e32 100644
--- a/llvm/lib/Target/RISCV/RISCV.td
+++ b/llvm/lib/Target/RISCV/RISCV.td
@@ -36,6 +36,12 @@ include "RISCVCallingConv.td"
 include "RISCVInstrInfo.td"
 include "GISel/RISCVRegisterBanks.td"
 
+//===----------------------------------------------------------------------===//
+// Instruction predicates
+//===----------------------------------------------------------------------===//
+
+include "RISCVInstrPredicates.td"
+
 //===----------------------------------------------------------------------===//
 // RISC-V macro fusions.
 //===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 850ea647ea049..70a3c616e37a0 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -20438,23 +20438,6 @@ static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
   return BB;
 }
 
-static bool isSelectPseudo(MachineInstr &MI) {
-  switch (MI.getOpcode()) {
-  default:
-    return false;
-  case RISCV::Select_GPR_Using_CC_GPR:
-  case RISCV::Select_GPR_Using_CC_Imm:
-  case RISCV::Select_FPR16_Using_CC_GPR:
-  case RISCV::Select_FPR16INX_Using_CC_GPR:
-  case RISCV::Select_FPR32_Using_CC_GPR:
-  case RISCV::Select_FPR32INX_Using_CC_GPR:
-  case RISCV::Select_FPR64_Using_CC_GPR:
-  case RISCV::Select_FPR64INX_Using_CC_GPR:
-  case RISCV::Select_FPR64IN32X_Using_CC_GPR:
-    return true;
-  }
-}
-
 static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
                                         unsigned RelOpcode, unsigned EqOpcode,
                                         const RISCVSubtarget &Subtarget) {
@@ -20650,7 +20633,7 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
        SequenceMBBI != E; ++SequenceMBBI) {
     if (SequenceMBBI->isDebugInstr())
       continue;
-    if (isSelectPseudo(*SequenceMBBI)) {
+    if (RISCVInstrInfo::isSelectPseudo(*SequenceMBBI)) {
       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
           !SequenceMBBI->getOperand(2).isReg() ||
           SequenceMBBI->getOperand(2).getReg() != RHS ||
@@ -20727,7 +20710,7 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
   auto InsertionPoint = TailMBB->begin();
   while (SelectMBBI != SelectEnd) {
     auto Next = std::next(SelectMBBI);
-    if (isSelectPseudo(*SelectMBBI)) {
+    if (RISCVInstrInfo::isSelectPseudo(*SelectMBBI)) {
       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 2247610c21ffb..a68da419e1238 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -69,74 +69,6 @@ static unsigned getSEWOpNum(const MachineInstr &MI) {
   return RISCVII::getSEWOpNum(MI.getDesc());
 }
 
-static bool isVectorConfigInstr(const MachineInstr &MI) {
-  return MI.getOpcode() == RISCV::PseudoVSETVLI ||
-         MI.getOpcode() == RISCV::PseudoVSETVLIX0 ||
-         MI.getOpcode() == RISCV::PseudoVSETIVLI;
-}
-
-/// Return true if this is 'vsetvli x0, x0, vtype' which preserves
-/// VL and only sets VTYPE.
-static bool isVLPreservingConfig(const MachineInstr &MI) {
-  if (MI.getOpcode() != RISCV::PseudoVSETVLIX0)
-    return false;
-  assert(RISCV::X0 == MI.getOperand(1).getReg());
-  return RISCV::X0 == MI.getOperand(0).getReg();
-}
-
-static bool isFloatScalarMoveOrScalarSplatInstr(const MachineInstr &MI) {
-  switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
-  default:
-    return false;
-  case RISCV::VFMV_S_F:
-  case RISCV::VFMV_V_F:
-    return true;
-  }
-}
-
-static bool isScalarExtractInstr(const MachineInstr &MI) {
-  switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
-  default:
-    return false;
-  case RISCV::VMV_X_S:
-  case RISCV::VFMV_F_S:
-    return true;
-  }
-}
-
-static bool isScalarInsertInstr(const MachineInstr &MI) {
-  switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
-  default:
-    return false;
-  case RISCV::VMV_S_X:
-  case RISCV::VFMV_S_F:
-    return true;
-  }
-}
-
-static bool isScalarSplatInstr(const MachineInstr &MI) {
-  switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
-  default:
-    return false;
-  case RISCV::VMV_V_I:
-  case RISCV::VMV_V_X:
-  case RISCV::VFMV_V_F:
-    return true;
-  }
-}
-
-static bool isVSlideInstr(const MachineInstr &MI) {
-  switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
-  default:
-    return false;
-  case RISCV::VSLIDEDOWN_VX:
-  case RISCV::VSLIDEDOWN_VI:
-  case RISCV::VSLIDEUP_VX:
-  case RISCV::VSLIDEUP_VI:
-    return true;
-  }
-}
-
 /// Get the EEW for a load or store instruction.  Return std::nullopt if MI is
 /// not a load or store which ignores SEW.
 static std::optional<unsigned> getEEWForLoadStore(const MachineInstr &MI) {
@@ -166,13 +98,6 @@ static std::optional<unsigned> getEEWForLoadStore(const MachineInstr &MI) {
   }
 }
 
-static bool isNonZeroLoadImmediate(const MachineInstr &MI) {
-  return MI.getOpcode() == RISCV::ADDI &&
-    MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
-    MI.getOperand(1).getReg() == RISCV::X0 &&
-    MI.getOperand(2).getImm() != 0;
-}
-
 /// Return true if this is an operation on mask registers.  Note that
 /// this includes both arithmetic/logical ops and load/store (vlm/vsm).
 static bool isMaskRegOp(const MachineInstr &MI) {
@@ -458,7 +383,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
   }
 
   // For vmv.s.x and vfmv.s.f, there are only two behaviors, VL = 0 and VL > 0.
-  if (isScalarInsertInstr(MI)) {
+  if (RISCVInstrInfo::isScalarInsertInstr(MI)) {
     Res.LMUL = DemandedFields::LMULNone;
     Res.SEWLMULRatio = false;
     Res.VLAny = false;
@@ -469,7 +394,8 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
     // tail lanes to either be the original value or -1.  We are writing
     // unknown bits to the lanes here.
     if (hasUndefinedPassthru(MI)) {
-      if (isFloatScalarMoveOrScalarSplatInstr(MI) && !ST->hasVInstructionsF64())
+      if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr(MI) &&
+          !ST->hasVInstructionsF64())
         Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
       else
         Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
@@ -478,7 +404,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
   }
 
   // vmv.x.s, and vfmv.f.s are unconditional and ignore everything except SEW.
-  if (isScalarExtractInstr(MI)) {
+  if (RISCVInstrInfo::isScalarExtractInstr(MI)) {
     assert(!RISCVII::hasVLOp(TSFlags));
     Res.LMUL = DemandedFields::LMULNone;
     Res.SEWLMULRatio = false;
@@ -496,8 +422,8 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
     //   non-zero VL.  We could generalize this if we had a VL > C predicate.
     // * The LMUL1 restriction is for machines whose latency may depend on VL.
     // * As above, this is only legal for tail "undefined" not "agnostic".
-    if (isVSlideInstr(MI) && VLOp.isImm() && VLOp.getImm() == 1 &&
-        hasUndefinedPassthru(MI)) {
+    if (RISCVInstrInfo::isVSlideInstr(MI) && VLOp.isImm() &&
+        VLOp.getImm() == 1 && hasUndefinedPassthru(MI)) {
       Res.VLAny = false;
       Res.VLZeroness = true;
       Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
@@ -510,12 +436,13 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
     // it's place. Since a splat is non-constant time in LMUL, we do need to be
     // careful to not increase the number of active vector registers (unlike for
     // vmv.s.x.)
-    if (isScalarSplatInstr(MI) && VLOp.isImm() && VLOp.getImm() == 1 &&
-        hasUndefinedPassthru(MI)) {
+    if (RISCVInstrInfo::isScalarSplatInstr(MI) && VLOp.isImm() &&
+        VLOp.getImm() == 1 && hasUndefinedPassthru(MI)) {
       Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
       Res.SEWLMULRatio = false;
       Res.VLAny = false;
-      if (isFloatScalarMoveOrScalarSplatInstr(MI) && !ST->hasVInstructionsF64())
+      if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr(MI) &&
+          !ST->hasVInstructionsF64())
         Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
       else
         Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
@@ -651,7 +578,7 @@ class VSETVLIInfo {
       return getAVLImm() > 0;
     if (hasAVLReg()) {
       if (auto *DefMI = getAVLDefMI(LIS))
-        return isNonZeroLoadImmediate(*DefMI);
+        return RISCVInstrInfo::isNonZeroLoadImmediate(*DefMI);
     }
     if (hasAVLVLMAX())
       return true;
@@ -979,7 +906,7 @@ void RISCVInsertVSETVLI::forwardVSETVLIAVL(VSETVLIInfo &Info) const {
   if (!Info.hasAVLReg())
     return;
   const MachineInstr *DefMI = Info.getAVLDefMI(LIS);
-  if (!DefMI || !isVectorConfigInstr(*DefMI))
+  if (!DefMI || !RISCVInstrInfo::isVectorConfigInstr(*DefMI))
     return;
   VSETVLIInfo DefInstrInfo = getInfoForVSETVLI(*DefMI);
   if (!DefInstrInfo.hasSameVLMAX(Info))
@@ -1085,7 +1012,7 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const {
       InstrInfo.setAVLRegDef(VNI, VLOp.getReg());
     }
   } else {
-    assert(isScalarExtractInstr(MI));
+    assert(RISCVInstrInfo::isScalarExtractInstr(MI));
     // Pick a random value for state tracking purposes, will be ignored via
     // the demanded fields mechanism
     InstrInfo.setAVLImm(1);
@@ -1126,7 +1053,7 @@ void RISCVInsertVSETVLI::insertVSETVLI(MachineBasicBlock &MBB,
     // same, we can use the X0, X0 form.
     if (Info.hasSameVLMAX(PrevInfo) && Info.hasAVLReg()) {
       if (const MachineInstr *DefMI = Info.getAVLDefMI(LIS);
-          DefMI && isVectorConfigInstr(*DefMI)) {
+          DefMI && RISCVInstrInfo::isVectorConfigInstr(*DefMI)) {
         VSETVLIInfo DefInfo = getInfoForVSETVLI(*DefMI);
         if (DefInfo.hasSameAVL(PrevInfo) && DefInfo.hasSameVLMAX(PrevInfo)) {
           auto MI = BuildMI(MBB, InsertPt, DL, TII->get(RISCV::PseudoVSETVLIX0))
@@ -1304,7 +1231,7 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info,
 // reflect the changes MI might make.
 void RISCVInsertVSETVLI::transferAfter(VSETVLIInfo &Info,
                                        const MachineInstr &MI) const {
-  if (isVectorConfigInstr(MI)) {
+  if (RISCVInstrInfo::isVectorConfigInstr(MI)) {
     Info = getInfoForVSETVLI(MI);
     return;
   }
@@ -1339,7 +1266,8 @@ bool RISCVInsertVSETVLI::computeVLVTYPEChanges(const MachineBasicBlock &MBB,
   for (const MachineInstr &MI : MBB) {
     transferBefore(Info, MI);
 
-    if (isVectorConfigInstr(MI) || RISCVII::hasSEWOp(MI.getDesc().TSFlags) ||
+    if (RISCVInstrInfo::isVectorConfigInstr(MI) ||
+        RISCVII::hasSEWOp(MI.getDesc().TSFlags) ||
         isVectorCopy(ST->getRegisterInfo(), MI))
       HadVectorOp = true;
 
@@ -1429,7 +1357,7 @@ bool RISCVInsertVSETVLI::needVSETVLIPHI(const VSETVLIInfo &Require,
     if (!Value)
       return true;
     MachineInstr *DefMI = LIS->getInstructionFromIndex(Value->def);
-    if (!DefMI || !isVectorConfigInstr(*DefMI))
+    if (!DefMI || !RISCVInstrInfo::isVectorConfigInstr(*DefMI))
       return true;
 
     // We found a VSET(I)VLI make sure it matches the output of the
@@ -1460,7 +1388,7 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
     transferBefore(CurInfo, MI);
 
     // If this is an explicit VSETVLI or VSETIVLI, update our state.
-    if (isVectorConfigInstr(MI)) {
+    if (RISCVInstrInfo::isVectorConfigInstr(MI)) {
       // Conservatively, mark the VL and VTYPE as live.
       assert(MI.getOperand(3).getReg() == RISCV::VL &&
              MI.getOperand(4).getReg() == RISCV::VTYPE &&
@@ -1667,12 +1595,12 @@ bool RISCVInsertVSETVLI::canMutatePriorConfig(
   // If the VL values aren't equal, return false if either a) the former is
   // demanded, or b) we can't rewrite the former to be the later for
   // implementation reasons.
-  if (!isVLPreservingConfig(MI)) {
+  if (!RISCVInstrInfo::isVLPreservingConfig(MI)) {
     if (Used.VLAny)
       return false;
 
     if (Used.VLZeroness) {
-      if (isVLPreservingConfig(PrevMI))
+      if (RISCVInstrInfo::isVLPreservingConfig(PrevMI))
         return false;
       if (!getInfoForVSETVLI(PrevMI).hasEquallyZeroAVL(getInfoForVSETVLI(MI),
                                                        LIS))
@@ -1723,7 +1651,7 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const {
 
   for (MachineInstr &MI : make_early_inc_range(reverse(MBB))) {
 
-    if (!isVectorConfigInstr(MI)) {
+    if (!RISCVInstrInfo::isVectorConfigInstr(MI)) {
       Used.doUnion(getDemanded(MI, ST));
       if (MI.isCall() || MI.isInlineAsm() ||
           MI.modifiesRegister(RISCV::VL, /*TRI=*/nullptr) ||
@@ -1747,7 +1675,7 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const {
       }
 
       if (canMutatePriorConfig(MI, *NextMI, Used)) {
-        if (!isVLPreservingConfig(*NextMI)) {
+        if (!RISCVInstrInfo::isVLPreservingConfig(*NextMI)) {
           Register DefReg = NextMI->getOperand(0).getReg();
 
           MI.getOperand(0).setReg(DefReg);
diff --git a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
index 7df04fc225b0b..6363da6613e64 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
@@ -27,6 +27,7 @@
 
 #include "MCTargetDesc/RISCVBaseInfo.h"
 #include "RISCV.h"
+#include "RISCVInstrInfo.h"
 #include "RISCVSubtarget.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
 #include <queue>
@@ -227,23 +228,13 @@ char RISCVInsertWriteVXRM::ID = 0;
 INITIALIZE_PASS(RISCVInsertWriteVXRM, DEBUG_TYPE, RISCV_INSERT_WRITE_VXRM_NAME,
                 false, false)
 
-static bool ignoresVXRM(const MachineInstr &MI) {
-  switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
-  default:
-    return false;
-  case RISCV::VNCLIP_WI:
-  case RISCV::VNCLIPU_WI:
-    return MI.getOperand(3).getImm() == 0;
-  }
-}
-
 bool RISCVInsertWriteVXRM::computeVXRMChanges(const MachineBasicBlock &MBB) {
   BlockData &BBInfo = BlockInfo[MBB.getNumber()];
 
   bool NeedVXRMWrite = false;
   for (const MachineInstr &MI : MBB) {
     int VXRMIdx = RISCVII::getVXRMOpNum(MI.getDesc());
-    if (VXRMIdx >= 0 && !ignoresVXRM(MI)) {
+    if (VXRMIdx >= 0 && !RISCVInstrInfo::ignoresVXRM(MI)) {
       unsigned NewVXRMImm = MI.getOperand(VXRMIdx).getImm();
 
       if (!BBInfo.VXRMUse.isValid())
@@ -401,7 +392,7 @@ void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) {
 
   for (MachineInstr &MI : MBB) {
     int VXRMIdx = RISCVII::getVXRMOpNum(MI.getDesc());
-    if (VXRMIdx >= 0 && !ignoresVXRM(MI)) {
+    if (VXRMIdx >= 0 && !RISCVInstrInfo::ignoresVXRM(MI)) {
       unsigned NewVXRMImm = MI.getOperand(VXRMIdx).getImm();
 
       if (PendingInsert || !Info.isStatic() ||
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index c4a2784263af0..c685390c4567a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -84,6 +84,9 @@ RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI)
     : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
       STI(STI) {}
 
+#define GET_INSTRINFO_HELPERS
+#include "RISCVGenInstrInfo.inc"
+
 MCInst RISCVInstrInfo::getNop() const {
   if (STI.hasStdExtCOrZca())
     return MCInstBuilder(RISCV::C_NOP);
@@ -835,11 +838,11 @@ std::optional<unsigned> getFoldedOpcode(MachineFunction &MF, MachineInstr &MI,
 
   switch (MI.getOpcode()) {
   default:
-    if (RISCV::isSEXT_W(MI))
+    if (RISCVInstrInfo::isSEXT_W(MI))
       return RISCV::LW;
-    if (RISCV::isZEXT_W(MI))
+    if (RISCVInstrInfo::isZEXT_W(MI))
       return RISCV::LWU;
-    if (RISCV::isZEXT_B(MI))
+    if (RISCVInstrInfo::isZEXT_B(MI))
       return RISCV::LBU;
     break;
   case RISCV::SEXT_H:
@@ -4169,24 +4172,6 @@ unsigned RISCVInstrInfo::getTailDuplicateSize(CodeGenOptLevel OptLevel) const {
              : 2;
 }
 
-// Returns true if this is the sext.w pattern, addiw rd, rs1, 0.
-bool RISCV::isSEXT_W(const MachineInstr &MI) {
-  return MI.getOpcode() == RISCV::ADDIW && MI.getOperand(1).isReg() &&
-         MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0;
-}
-
-// Returns true if this is the zext.w pattern, adduw rd, rs1, x0.
-bool RISCV::isZEXT_W(const MachineInstr &MI) {
-  return MI.getOpcode() == RISCV::ADD_UW && MI.getOperand(1).isReg() &&
-         MI.getOperand(2).isReg() && MI.getOperand(2).getReg() == RISCV::X0;
-}
-
-// Returns true if this is the zext.b pattern, andi rd, rs1, 255.
-bool RISCV::isZEXT_B(const MachineInstr &MI) {
-  return MI.getOpcode() == RISCV::ANDI && MI.getOperand(1).isReg() &&
-         MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 255;
-}
-
 bool RISCV::isRVVSpill(const MachineInstr &MI) {
   // RVV lacks any support for immediate addressing for stack addresses, so be
   // conservative.
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 67e457d64f6e3..e4d43b12d65d4 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -308,6 +308,8 @@ class RISCVInstrInfo : public RISCVGenInstrInfo {
 
   static bool isLdStSafeToPair(const MachineInstr &LdSt,
                                const TargetRegisterInfo *TRI);
+#define GET_INSTRINFO_HELPER_DECLS
+#include "RISCVGenInstrInfo.inc"
 
   /// Return the result of the evaluation of C0 CC C1, where CC is a
   /// RISCVCC::CondCode.
@@ -334,11 +336,6 @@ class RISCVInstrInfo : public RISCVGenInstrInfo {
 
 namespace RISCV {
 
-// Returns true if this is the sext.w pattern, addiw rd, rs1, 0.
-bool isSEXT_W(const MachineInstr &MI);
-bool isZEXT_W(const MachineInstr &MI);
-bool isZEXT_B(const MachineInstr &MI);
-
 // Returns true if the given MI is an RVV instruction opcode for which we may
 // expect to see a FrameIndex operand.
 bool isRVVSpill(const MachineInstr &MI);
diff --git a/llvm/lib/Target/RISCV/RISCVInstrPredicates.td b/llvm/lib/Target/RISCV/RISCVInstrPredicates.td
new file mode 100644
index 0000000000000..67302d08c9743
--- /dev/null
+++ b/llvm/lib/Target/RISCV/RISCVInstrPredicates.td
@@ -0,0 +1,143 @@
+//===-- RISCVInstrPredicates.td - Instruction Predicates ---*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the RISC-V instruction predicates.
+//
+//===----------------------------------------------------------------------===//
+
+// Returns true if this is the sext.w pattern, addiw rd, rs1, 0.
+def isSEXT_W
+    : TIIPredicate<"isSEXT_W",
+                   MCReturnStatement<CheckAll<[
+                     CheckOpcode<[ADDIW]>,
+                     CheckIsRegOperand<1>,
+                     CheckIsImmOperand<2>,
+                     CheckImmOperand<2, 0>
+                   ]>>>;
+
+// Returns true if this is the zext.w pattern, adduw rd, rs1, x0.
+def isZEXT_W
+    : TIIPredicate<"isZEXT_W",
+                   MCReturnStatement<CheckAll<[
+                     CheckOpcode<[ADD_UW]>,
+                     CheckIsRegOperand<1>,
+                     CheckIsRegOperand<2>,
+                     CheckRegOperand<2, X0>
+                   ]>>>;
+
+// Returns true if this is the zext.b pattern, andi rd, rs1, 255.
+def isZEXT_B
+    : TIIPredicate<"isZEXT_B",
+                   MCReturnStatement<CheckAll<[
+                     CheckOpcode<[ANDI]>,
+                     CheckIsRegOperand<1>,
+                     CheckIsImmOperand<2>,
+                     CheckImmOperand<2, 255>
+                   ]>>>;
+
+// Returns true if this is the zext.b pattern, andi rd, rs1, 255.
+def isSelectPseudo
+    : TIIPredicate<"isSelectPseudo",
+                   MCOpcodeSwitchStatement<
+                     [MCOpcodeSwitchCase<
+                        [Select_GPR_Using_CC_GPR,
+                         Select_GPR_Using_CC_Imm,
+                         Select_FPR16_Using_CC_GPR,
+                         Select_FPR16INX_Using_CC_GPR,
+                         Select_FPR32_Using_CC_GPR,
+                         Select_FPR32INX_Using_CC_GPR,
+                         Select_FPR64_Using_CC_GPR,
+                         Select_FPR64INX_Using_CC_GPR,
+                         Select_FPR64IN32X_Using_CC_GPR
+                        ],
+                        MCReturnStatement<TruePred>>],
+                      MCReturnStatement<FalsePred>>>;
+
+// Returns true if this is a vector configuration instruction.
+def isVectorConfigInstr
+    : TIIPredicate<"isVectorConfigInstr",
+                   MCReturnStatement<
+                     CheckOpcode<[
+                       PseudoVSETVLI,
+                       PseudoVSETVLIX0,
+                       PseudoVSETIVLI
+                     ]>>>;
+
+// Return true if this is 'vsetvli x0, x0, vtype' which preserves
+// VL and only sets VTYPE.
+def isVLPreservingConfig
+    : TIIPredicate<"isVLPreservingConfig",
+                   MCReturnStatement<
+                     CheckAll<[
+                       CheckOpcode<[PseudoVSETVLIX0]>,
+                       CheckRegOperand<0, X0>
+                     ]>>>;
+
+def isFloatScalarMoveOrScalarSplatInstr
+    : TIIPredicate<"isFloatScalarMoveOrScalarSplatInstr",
+                   MCReturnStatement<
+                     CheckOpcode<!listflatten([
+                      !instances<Instruction>("PseudoVFMV_S_F.*"),
+                      !instances<Instruction>("PseudoVFMV_V_F.*")
+                     ])>>>;
+
+def isScalarExtractInstr
+    : TIIPredicate<"isScalarExtractInstr",
+                   MCReturnStatement<
+                     CheckOpcode<!listflatten([
+                      !instances<Instruction>("PseudoVMV_X_S.*"),
+                      !instances<Instruction>("PseudoVFMV_F.*_S.*")
+                     ])>>>;
+
+def isScalarInsertInstr
+    : TIIPredicate<"isScalarInsertInstr",
+                   MCReturnStatement<
+                     CheckOpcode<!listflatten([
+                      !instances<Instruction>("PseudoVMV_S_X.*"),
+                      !instances<Instruction>("PseudoVFMV_S_F.*")
+                     ])>>>;
+
+def isScalarSplatInstr
+    : TIIPredicate<"isScalarSplatInstr",
+                   MCReturnStatement<
+                     CheckOpcode<!listflatten([
+                      !instances<Instruction>("PseudoVMV_V_I.*"),
+                      !instances<Instruction>("PseudoVMV_V_X.*"),
+                      !instances<Instruction>("PseudoVFMV_V_F.*")
+                     ])>>>;
+
+def isVSlideInstr
+    : TIIPredicate<"isVSlideInstr",
+                   MCReturnStatement<
+                     CheckOpcode<!listflatten([
+                      !instances<Instruction>("PseudoVSLIDEDOWN_VX.*"),
+                      !instances<Instruction>("PseudoVSLIDEDOWN_VI.*"),
+                      !instances<Instruction>("PseudoVSLIDEUP_VX.*"),
+                      !instances<Instruction>("PseudoVSLIDEUP_VI.*")
+                     ])>>>;
+
+def isNonZeroLoadImmediate
+    : TIIPredicate<"isNonZeroLoadImmediate",
+                   MCReturnStatement<CheckAll<[
+                     CheckOpcode<[ADDI]>,
+                     CheckIsRegOperand<1>,
+                     CheckRegOperand<1, X0>,
+                     CheckIsImmOperand<2>,
+                     CheckNot<CheckImmOperand<2, 0>>
+                   ]>>>;
+
+def ignoresVXRM
+    : TIIPredicate<"ignoresVXRM",
+                   MCOpcodeSwitchStatement<
+                     [MCOpcodeSwitchCase<
+                        !listflatten([
+                          !instances<Instruction>("PseudoVNCLIP_WI.*"),
+                          !instances<Instruction>("PseudoVNCLIPU_WI.*")
+                        ]),
+                        MCReturnStatement<CheckImmOperand<3, 0>>>],
+                      MCReturnStatement<FalsePred>>>;
diff --git a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
index 28bee83837654..3b601bb43bf28 100644
--- a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
+++ b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
@@ -648,7 +648,7 @@ bool RISCVOptWInstrs::removeSExtWInstrs(MachineFunction &MF,
   for (MachineBasicBlock &MBB : MF) {
     for (MachineInstr &MI : llvm::make_early_inc_range(MBB)) {
       // We're looking for the sext.w pattern ADDIW rd, rs1, 0.
-      if (!RISCV::isSEXT_W(MI))
+      if (!RISCVInstrInfo::isSEXT_W(MI))
         continue;
 
       Register SrcReg = MI.getOperand(1).getReg();

>From dd1a484a987a92b9dc31339a3269154937e0932d Mon Sep 17 00:00:00 2001
From: Pengcheng Wang <wangpengcheng.pp at bytedance.com>
Date: Tue, 22 Apr 2025 21:12:38 +0800
Subject: [PATCH 2/2] Use Pseudo instead of Instruction to narrow the scope

---
 llvm/lib/Target/RISCV/RISCVInstrPredicates.td | 30 +++++++++----------
 1 file changed, 15 insertions(+), 15 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInstrPredicates.td b/llvm/lib/Target/RISCV/RISCVInstrPredicates.td
index 67302d08c9743..8d1d3da499575 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrPredicates.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrPredicates.td
@@ -82,43 +82,43 @@ def isFloatScalarMoveOrScalarSplatInstr
     : TIIPredicate<"isFloatScalarMoveOrScalarSplatInstr",
                    MCReturnStatement<
                      CheckOpcode<!listflatten([
-                      !instances<Instruction>("PseudoVFMV_S_F.*"),
-                      !instances<Instruction>("PseudoVFMV_V_F.*")
+                      !instances<Pseudo>("PseudoVFMV_S_F.*"),
+                      !instances<Pseudo>("PseudoVFMV_V_F.*")
                      ])>>>;
 
 def isScalarExtractInstr
     : TIIPredicate<"isScalarExtractInstr",
                    MCReturnStatement<
                      CheckOpcode<!listflatten([
-                      !instances<Instruction>("PseudoVMV_X_S.*"),
-                      !instances<Instruction>("PseudoVFMV_F.*_S.*")
+                      !instances<Pseudo>("PseudoVMV_X_S.*"),
+                      !instances<Pseudo>("PseudoVFMV_F.*_S.*")
                      ])>>>;
 
 def isScalarInsertInstr
     : TIIPredicate<"isScalarInsertInstr",
                    MCReturnStatement<
                      CheckOpcode<!listflatten([
-                      !instances<Instruction>("PseudoVMV_S_X.*"),
-                      !instances<Instruction>("PseudoVFMV_S_F.*")
+                      !instances<Pseudo>("PseudoVMV_S_X.*"),
+                      !instances<Pseudo>("PseudoVFMV_S_F.*")
                      ])>>>;
 
 def isScalarSplatInstr
     : TIIPredicate<"isScalarSplatInstr",
                    MCReturnStatement<
                      CheckOpcode<!listflatten([
-                      !instances<Instruction>("PseudoVMV_V_I.*"),
-                      !instances<Instruction>("PseudoVMV_V_X.*"),
-                      !instances<Instruction>("PseudoVFMV_V_F.*")
+                      !instances<Pseudo>("PseudoVMV_V_I.*"),
+                      !instances<Pseudo>("PseudoVMV_V_X.*"),
+                      !instances<Pseudo>("PseudoVFMV_V_F.*")
                      ])>>>;
 
 def isVSlideInstr
     : TIIPredicate<"isVSlideInstr",
                    MCReturnStatement<
                      CheckOpcode<!listflatten([
-                      !instances<Instruction>("PseudoVSLIDEDOWN_VX.*"),
-                      !instances<Instruction>("PseudoVSLIDEDOWN_VI.*"),
-                      !instances<Instruction>("PseudoVSLIDEUP_VX.*"),
-                      !instances<Instruction>("PseudoVSLIDEUP_VI.*")
+                      !instances<Pseudo>("PseudoVSLIDEDOWN_VX.*"),
+                      !instances<Pseudo>("PseudoVSLIDEDOWN_VI.*"),
+                      !instances<Pseudo>("PseudoVSLIDEUP_VX.*"),
+                      !instances<Pseudo>("PseudoVSLIDEUP_VI.*")
                      ])>>>;
 
 def isNonZeroLoadImmediate
@@ -136,8 +136,8 @@ def ignoresVXRM
                    MCOpcodeSwitchStatement<
                      [MCOpcodeSwitchCase<
                         !listflatten([
-                          !instances<Instruction>("PseudoVNCLIP_WI.*"),
-                          !instances<Instruction>("PseudoVNCLIPU_WI.*")
+                          !instances<Pseudo>("PseudoVNCLIP_WI.*"),
+                          !instances<Pseudo>("PseudoVNCLIPU_WI.*")
                         ]),
                         MCReturnStatement<CheckImmOperand<3, 0>>>],
                       MCReturnStatement<FalsePred>>>;



More information about the llvm-commits mailing list