[llvm] e179b12 - [RISCV][NFC] Pass MCSubtargetInfo instead of FeatureBitset in RISCVMatInt (#71770)

via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 8 23:15:28 PST 2023


Author: Wang Pengcheng
Date: 2023-11-09T15:15:23+08:00
New Revision: e179b125fb019dae54aecbb5687f77c6dd67a17d

URL: https://github.com/llvm/llvm-project/commit/e179b125fb019dae54aecbb5687f77c6dd67a17d
DIFF: https://github.com/llvm/llvm-project/commit/e179b125fb019dae54aecbb5687f77c6dd67a17d.diff

LOG: [RISCV][NFC] Pass MCSubtargetInfo instead of FeatureBitset in RISCVMatInt (#71770)

The use of `hasFeature` is more descriptive and the callers of
`RISCVMatInt` have no need to call `getFeatureBits()` any more.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
    llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
    llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
    llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
    llvm/lib/Target/RISCV/RISCVPostRAExpandPseudoInsts.cpp
    llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index eb861cee674f3f9..f6256aa57304bda 100644
--- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -2977,8 +2977,7 @@ void RISCVAsmParser::emitToStreamer(MCStreamer &S, const MCInst &Inst) {
 
 void RISCVAsmParser::emitLoadImm(MCRegister DestReg, int64_t Value,
                                  MCStreamer &Out) {
-  RISCVMatInt::InstSeq Seq =
-      RISCVMatInt::generateInstSeq(Value, getSTI().getFeatureBits());
+  RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Value, getSTI());
 
   MCRegister SrcReg = RISCV::X0;
   for (const RISCVMatInt::Inst &Inst : Seq) {

diff  --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 6b5a96dbfc49fff..f9a8b4970845ba3 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -538,8 +538,7 @@ bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
     return true;
   }
 
-  RISCVMatInt::InstSeq Seq =
-      RISCVMatInt::generateInstSeq(Imm, Subtarget->getFeatureBits());
+  RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Imm, *Subtarget);
   unsigned NumInsts = Seq.size();
   Register SrcReg = RISCV::X0;
 

diff  --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
index 403bd727684f684..4358a5b878e6316 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
@@ -45,13 +45,12 @@ static int getInstSeqCost(RISCVMatInt::InstSeq &Res, bool HasRVC) {
 }
 
 // Recursively generate a sequence for materializing an integer.
-static void generateInstSeqImpl(int64_t Val,
-                                const FeatureBitset &ActiveFeatures,
+static void generateInstSeqImpl(int64_t Val, const MCSubtargetInfo &STI,
                                 RISCVMatInt::InstSeq &Res) {
-  bool IsRV64 = ActiveFeatures[RISCV::Feature64Bit];
+  bool IsRV64 = STI.hasFeature(RISCV::Feature64Bit);
 
   // Use BSETI for a single bit that can't be expressed by a single LUI or ADDI.
-  if (ActiveFeatures[RISCV::FeatureStdExtZbs] && isPowerOf2_64(Val) &&
+  if (STI.hasFeature(RISCV::FeatureStdExtZbs) && isPowerOf2_64(Val) &&
       (!isInt<32>(Val) || Val == 0x800)) {
     Res.emplace_back(RISCV::BSETI, Log2_64(Val));
     return;
@@ -122,7 +121,7 @@ static void generateInstSeqImpl(int64_t Val,
         ShiftAmount -= 12;
         Val = (uint64_t)Val << 12;
       } else if (isUInt<32>((uint64_t)Val << 12) &&
-                 ActiveFeatures[RISCV::FeatureStdExtZba]) {
+                 STI.hasFeature(RISCV::FeatureStdExtZba)) {
         // Reduce the shift amount and add zeros to the LSBs so it will match
         // LUI, then shift left with SLLI.UW to clear the upper 32 set bits.
         ShiftAmount -= 12;
@@ -133,7 +132,7 @@ static void generateInstSeqImpl(int64_t Val,
 
     // Try to use SLLI_UW for Val when it is uint32 but not int32.
     if (isUInt<32>((uint64_t)Val) && !isInt<32>((uint64_t)Val) &&
-        ActiveFeatures[RISCV::FeatureStdExtZba]) {
+        STI.hasFeature(RISCV::FeatureStdExtZba)) {
       // Use LUI+ADDI or LUI to compose, then clear the upper 32 bits with
       // SLLI_UW.
       Val = ((uint64_t)Val) | (0xffffffffull << 32);
@@ -141,7 +140,7 @@ static void generateInstSeqImpl(int64_t Val,
     }
   }
 
-  generateInstSeqImpl(Val, ActiveFeatures, Res);
+  generateInstSeqImpl(Val, STI, Res);
 
   // Skip shift if we were able to use LUI directly.
   if (ShiftAmount) {
@@ -171,8 +170,7 @@ static unsigned extractRotateInfo(int64_t Val) {
   return 0;
 }
 
-static void generateInstSeqLeadingZeros(int64_t Val,
-                                        const FeatureBitset &ActiveFeatures,
+static void generateInstSeqLeadingZeros(int64_t Val, const MCSubtargetInfo &STI,
                                         RISCVMatInt::InstSeq &Res) {
   assert(Val > 0 && "Expected postive val");
 
@@ -184,7 +182,7 @@ static void generateInstSeqLeadingZeros(int64_t Val,
   ShiftedVal |= maskTrailingOnes<uint64_t>(LeadingZeros);
 
   RISCVMatInt::InstSeq TmpSeq;
-  generateInstSeqImpl(ShiftedVal, ActiveFeatures, TmpSeq);
+  generateInstSeqImpl(ShiftedVal, STI, TmpSeq);
 
   // Keep the new sequence if it is an improvement or the original is empty.
   if ((TmpSeq.size() + 1) < Res.size() ||
@@ -196,7 +194,7 @@ static void generateInstSeqLeadingZeros(int64_t Val,
   // Some cases can benefit from filling the lower bits with zeros instead.
   ShiftedVal &= maskTrailingZeros<uint64_t>(LeadingZeros);
   TmpSeq.clear();
-  generateInstSeqImpl(ShiftedVal, ActiveFeatures, TmpSeq);
+  generateInstSeqImpl(ShiftedVal, STI, TmpSeq);
 
   // Keep the new sequence if it is an improvement or the original is empty.
   if ((TmpSeq.size() + 1) < Res.size() ||
@@ -207,11 +205,11 @@ static void generateInstSeqLeadingZeros(int64_t Val,
 
   // If we have exactly 32 leading zeros and Zba, we can try using zext.w at
   // the end of the sequence.
-  if (LeadingZeros == 32 && ActiveFeatures[RISCV::FeatureStdExtZba]) {
+  if (LeadingZeros == 32 && STI.hasFeature(RISCV::FeatureStdExtZba)) {
     // Try replacing upper bits with 1.
     uint64_t LeadingOnesVal = Val | maskLeadingOnes<uint64_t>(LeadingZeros);
     TmpSeq.clear();
-    generateInstSeqImpl(LeadingOnesVal, ActiveFeatures, TmpSeq);
+    generateInstSeqImpl(LeadingOnesVal, STI, TmpSeq);
 
     // Keep the new sequence if it is an improvement.
     if ((TmpSeq.size() + 1) < Res.size() ||
@@ -223,9 +221,9 @@ static void generateInstSeqLeadingZeros(int64_t Val,
 }
 
 namespace llvm::RISCVMatInt {
-InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
+InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI) {
   RISCVMatInt::InstSeq Res;
-  generateInstSeqImpl(Val, ActiveFeatures, Res);
+  generateInstSeqImpl(Val, STI, Res);
 
   // If the low 12 bits are non-zero, the first expansion may end with an ADDI
   // or ADDIW. If there are trailing zeros, try generating a sign extended
@@ -238,9 +236,9 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
     // NOTE: We don't check for C extension to minimize 
diff erences in generated
     // code.
     bool IsShiftedCompressible =
-              isInt<6>(ShiftedVal) && !ActiveFeatures[RISCV::TuneLUIADDIFusion];
+        isInt<6>(ShiftedVal) && !STI.hasFeature(RISCV::TuneLUIADDIFusion);
     RISCVMatInt::InstSeq TmpSeq;
-    generateInstSeqImpl(ShiftedVal, ActiveFeatures, TmpSeq);
+    generateInstSeqImpl(ShiftedVal, STI, TmpSeq);
 
     // Keep the new sequence if it is an improvement.
     if ((TmpSeq.size() + 1) < Res.size() || IsShiftedCompressible) {
@@ -254,7 +252,7 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
   if (Res.size() <= 2)
     return Res;
 
-  assert(ActiveFeatures[RISCV::Feature64Bit] &&
+  assert(STI.hasFeature(RISCV::Feature64Bit) &&
          "Expected RV32 to only need 2 instructions");
 
   // If the lower 13 bits are something like 0x17ff, try to add 1 to change the
@@ -266,7 +264,7 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
     int64_t Imm12 = -(0x800 - (Val & 0xfff));
     int64_t AdjustedVal = Val - Imm12;
     RISCVMatInt::InstSeq TmpSeq;
-    generateInstSeqImpl(AdjustedVal, ActiveFeatures, TmpSeq);
+    generateInstSeqImpl(AdjustedVal, STI, TmpSeq);
 
     // Keep the new sequence if it is an improvement.
     if ((TmpSeq.size() + 1) < Res.size()) {
@@ -278,7 +276,7 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
   // If the constant is positive we might be able to generate a shifted constant
   // with no leading zeros and use a final SRLI to restore them.
   if (Val > 0 && Res.size() > 2) {
-    generateInstSeqLeadingZeros(Val, ActiveFeatures, Res);
+    generateInstSeqLeadingZeros(Val, STI, Res);
   }
 
   // If the constant is negative, trying inverting and using our trailing zero
@@ -286,7 +284,7 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
   if (Val < 0 && Res.size() > 3) {
     uint64_t InvertedVal = ~(uint64_t)Val;
     RISCVMatInt::InstSeq TmpSeq;
-    generateInstSeqLeadingZeros(InvertedVal, ActiveFeatures, TmpSeq);
+    generateInstSeqLeadingZeros(InvertedVal, STI, TmpSeq);
 
     // Keep it if we found a sequence that is smaller after inverting.
     if (!TmpSeq.empty() && (TmpSeq.size() + 1) < Res.size()) {
@@ -298,12 +296,12 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
   // If the Low and High halves are the same, use pack. The pack instruction
   // packs the XLEN/2-bit lower halves of rs1 and rs2 into rd, with rs1 in the
   // lower half and rs2 in the upper half.
-  if (Res.size() > 2 && ActiveFeatures[RISCV::FeatureStdExtZbkb]) {
+  if (Res.size() > 2 && STI.hasFeature(RISCV::FeatureStdExtZbkb)) {
     int64_t LoVal = SignExtend64<32>(Val);
     int64_t HiVal = SignExtend64<32>(Val >> 32);
     if (LoVal == HiVal) {
       RISCVMatInt::InstSeq TmpSeq;
-      generateInstSeqImpl(LoVal, ActiveFeatures, TmpSeq);
+      generateInstSeqImpl(LoVal, STI, TmpSeq);
       if ((TmpSeq.size() + 1) < Res.size()) {
         TmpSeq.emplace_back(RISCV::PACK, 0);
         Res = TmpSeq;
@@ -312,7 +310,7 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
   }
 
   // Perform optimization with BCLRI/BSETI in the Zbs extension.
-  if (Res.size() > 2 && ActiveFeatures[RISCV::FeatureStdExtZbs]) {
+  if (Res.size() > 2 && STI.hasFeature(RISCV::FeatureStdExtZbs)) {
     // 1. For values in range 0xffffffff 7fffffff ~ 0xffffffff 00000000,
     //    call generateInstSeqImpl with Val|0x80000000 (which is expected be
     //    an int32), then emit (BCLRI r, 31).
@@ -330,7 +328,7 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
     }
     if (isInt<32>(NewVal)) {
       RISCVMatInt::InstSeq TmpSeq;
-      generateInstSeqImpl(NewVal, ActiveFeatures, TmpSeq);
+      generateInstSeqImpl(NewVal, STI, TmpSeq);
       if ((TmpSeq.size() + 1) < Res.size()) {
         TmpSeq.emplace_back(Opc, 31);
         Res = TmpSeq;
@@ -344,7 +342,7 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
     uint32_t Hi = Hi_32(Val);
     Opc = 0;
     RISCVMatInt::InstSeq TmpSeq;
-    generateInstSeqImpl(Lo, ActiveFeatures, TmpSeq);
+    generateInstSeqImpl(Lo, STI, TmpSeq);
     // Check if it is profitable to use BCLRI/BSETI.
     if (Lo > 0 && TmpSeq.size() + llvm::popcount(Hi) < Res.size()) {
       Opc = RISCV::BSETI;
@@ -365,7 +363,7 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
   }
 
   // Perform optimization with SH*ADD in the Zba extension.
-  if (Res.size() > 2 && ActiveFeatures[RISCV::FeatureStdExtZba]) {
+  if (Res.size() > 2 && STI.hasFeature(RISCV::FeatureStdExtZba)) {
     int64_t Div = 0;
     unsigned Opc = 0;
     RISCVMatInt::InstSeq TmpSeq;
@@ -382,7 +380,7 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
     }
     // Build the new instruction sequence.
     if (Div > 0) {
-      generateInstSeqImpl(Val / Div, ActiveFeatures, TmpSeq);
+      generateInstSeqImpl(Val / Div, STI, TmpSeq);
       if ((TmpSeq.size() + 1) < Res.size()) {
         TmpSeq.emplace_back(Opc, 0);
         Res = TmpSeq;
@@ -409,7 +407,7 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
         assert(Lo12 != 0 &&
                "unexpected instruction sequence for immediate materialisation");
         assert(TmpSeq.empty() && "Expected empty TmpSeq");
-        generateInstSeqImpl(Hi52 / Div, ActiveFeatures, TmpSeq);
+        generateInstSeqImpl(Hi52 / Div, STI, TmpSeq);
         if ((TmpSeq.size() + 2) < Res.size()) {
           TmpSeq.emplace_back(Opc, 0);
           TmpSeq.emplace_back(RISCV::ADDI, Lo12);
@@ -421,14 +419,14 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
 
   // Perform optimization with rori in the Zbb and th.srri in the XTheadBb
   // extension.
-  if (Res.size() > 2 && (ActiveFeatures[RISCV::FeatureStdExtZbb] ||
-                         ActiveFeatures[RISCV::FeatureVendorXTHeadBb])) {
+  if (Res.size() > 2 && (STI.hasFeature(RISCV::FeatureStdExtZbb) ||
+                         STI.hasFeature(RISCV::FeatureVendorXTHeadBb))) {
     if (unsigned Rotate = extractRotateInfo(Val)) {
       RISCVMatInt::InstSeq TmpSeq;
       uint64_t NegImm12 = llvm::rotl<uint64_t>(Val, Rotate);
       assert(isInt<12>(NegImm12));
       TmpSeq.emplace_back(RISCV::ADDI, NegImm12);
-      TmpSeq.emplace_back(ActiveFeatures[RISCV::FeatureStdExtZbb]
+      TmpSeq.emplace_back(STI.hasFeature(RISCV::FeatureStdExtZbb)
                               ? RISCV::RORI
                               : RISCV::TH_SRRI,
                           Rotate);
@@ -438,7 +436,7 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
   return Res;
 }
 
-InstSeq generateTwoRegInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures,
+InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI,
                               unsigned &ShiftAmt, unsigned &AddOpc) {
   int64_t LoVal = SignExtend64<32>(Val);
   if (LoVal == 0)
@@ -459,23 +457,23 @@ InstSeq generateTwoRegInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures,
   AddOpc = RISCV::ADD;
 
   if (Tmp == ((uint64_t)LoVal << ShiftAmt))
-    return RISCVMatInt::generateInstSeq(LoVal, ActiveFeatures);
+    return RISCVMatInt::generateInstSeq(LoVal, STI);
 
   // If we have Zba, we can use (ADD_UW X, (SLLI X, 32)).
-  if (ActiveFeatures[RISCV::FeatureStdExtZba] && Lo_32(Val) == Hi_32(Val)) {
+  if (STI.hasFeature(RISCV::FeatureStdExtZba) && Lo_32(Val) == Hi_32(Val)) {
     ShiftAmt = 32;
     AddOpc = RISCV::ADD_UW;
-    return RISCVMatInt::generateInstSeq(LoVal, ActiveFeatures);
+    return RISCVMatInt::generateInstSeq(LoVal, STI);
   }
 
   return RISCVMatInt::InstSeq();
 }
 
-int getIntMatCost(const APInt &Val, unsigned Size,
-                  const FeatureBitset &ActiveFeatures, bool CompressionCost) {
-  bool IsRV64 = ActiveFeatures[RISCV::Feature64Bit];
-  bool HasRVC = CompressionCost && (ActiveFeatures[RISCV::FeatureStdExtC] ||
-                                    ActiveFeatures[RISCV::FeatureStdExtZca]);
+int getIntMatCost(const APInt &Val, unsigned Size, const MCSubtargetInfo &STI,
+                  bool CompressionCost) {
+  bool IsRV64 = STI.hasFeature(RISCV::Feature64Bit);
+  bool HasRVC = CompressionCost && (STI.hasFeature(RISCV::FeatureStdExtC) ||
+                                    STI.hasFeature(RISCV::FeatureStdExtZca));
   int PlatRegSize = IsRV64 ? 64 : 32;
 
   // Split the constant into platform register sized chunks, and calculate cost
@@ -483,7 +481,7 @@ int getIntMatCost(const APInt &Val, unsigned Size,
   int Cost = 0;
   for (unsigned ShiftVal = 0; ShiftVal < Size; ShiftVal += PlatRegSize) {
     APInt Chunk = Val.ashr(ShiftVal).sextOrTrunc(PlatRegSize);
-    InstSeq MatSeq = generateInstSeq(Chunk.getSExtValue(), ActiveFeatures);
+    InstSeq MatSeq = generateInstSeq(Chunk.getSExtValue(), STI);
     Cost += getInstSeqCost(MatSeq, HasRVC);
   }
   return std::max(1, Cost);

diff  --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h
index 072b30f2a06484a..780f685463f3004 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h
@@ -10,7 +10,7 @@
 #define LLVM_LIB_TARGET_RISCV_MCTARGETDESC_MATINT_H
 
 #include "llvm/ADT/SmallVector.h"
-#include "llvm/TargetParser/SubtargetFeature.h"
+#include "llvm/MC/MCSubtargetInfo.h"
 #include <cstdint>
 
 namespace llvm {
@@ -46,14 +46,14 @@ using InstSeq = SmallVector<Inst, 8>;
 // simple struct is produced rather than directly emitting the instructions in
 // order to allow this helper to be used from both the MC layer and during
 // instruction selection.
-InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures);
+InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI);
 
 // Helper to generate an instruction sequence that can materialize the given
 // immediate value into a register using an additional temporary register. This
 // handles cases where the constant can be generated by (ADD (SLLI X, C), X) or
 // (ADD_UW (SLLI X, C) X). The sequence to generate X is returned. ShiftAmt is
 // provides the SLLI and AddOpc indicates ADD or ADD_UW.
-InstSeq generateTwoRegInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures,
+InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI,
                               unsigned &ShiftAmt, unsigned &AddOpc);
 
 // Helper to estimate the number of instructions required to materialise the
@@ -66,8 +66,7 @@ InstSeq generateTwoRegInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures,
 // If CompressionCost is true it will use a 
diff erent cost calculation if RVC is
 // enabled. This should be used to compare two 
diff erent sequences to determine
 // which is more compressible.
-int getIntMatCost(const APInt &Val, unsigned Size,
-                  const FeatureBitset &ActiveFeatures,
+int getIntMatCost(const APInt &Val, unsigned Size, const MCSubtargetInfo &STI,
                   bool CompressionCost = false);
 } // namespace RISCVMatInt
 } // namespace llvm

diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 920657a198d9b6b..1266c370cddeb5e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -201,8 +201,7 @@ static SDValue selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
 
 static SDValue selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
                          int64_t Imm, const RISCVSubtarget &Subtarget) {
-  RISCVMatInt::InstSeq Seq =
-      RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
+  RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Imm, Subtarget);
 
   // Use a rematerializable pseudo instruction for short sequences if enabled.
   if (Seq.size() == 2 && UsePseudoMovImm)
@@ -218,8 +217,8 @@ static SDValue selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
   // low and high 32 bits are the same and bit 31 and 63 are set.
   if (Seq.size() > 3) {
     unsigned ShiftAmt, AddOpc;
-    RISCVMatInt::InstSeq SeqLo = RISCVMatInt::generateTwoRegInstSeq(
-        Imm, Subtarget.getFeatureBits(), ShiftAmt, AddOpc);
+    RISCVMatInt::InstSeq SeqLo =
+        RISCVMatInt::generateTwoRegInstSeq(Imm, Subtarget, ShiftAmt, AddOpc);
     if (!SeqLo.empty() && (SeqLo.size() + 2) < Seq.size()) {
       SDValue Lo = selectImmSeq(CurDAG, DL, VT, SeqLo);
 
@@ -2283,8 +2282,7 @@ static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL,
   }
 
   // Ask how constant materialization would handle this constant.
-  RISCVMatInt::InstSeq Seq =
-      RISCVMatInt::generateInstSeq(CVal, Subtarget->getFeatureBits());
+  RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(CVal, *Subtarget);
 
   // If the last instruction would be an ADDI, we can fold its immediate and
   // emit the rest of the sequence as the base.

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 392ceeb537b692d..023a70d56294ad6 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1879,8 +1879,7 @@ bool RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
   // TODO: Should we keep the load only when we're definitely going to emit a
   // constant pool?
 
-  RISCVMatInt::InstSeq Seq =
-      RISCVMatInt::generateInstSeq(Val, Subtarget.getFeatureBits());
+  RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Val, Subtarget);
   return Seq.size() <= Subtarget.getMaxBuildIntsCost();
 }
 
@@ -2118,8 +2117,8 @@ bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
   // Building an integer and then converting requires a fmv at the end of
   // the integer sequence.
   const int Cost =
-    1 + RISCVMatInt::getIntMatCost(Imm.bitcastToAPInt(), Subtarget.getXLen(),
-                                   Subtarget.getFeatureBits());
+      1 + RISCVMatInt::getIntMatCost(Imm.bitcastToAPInt(), Subtarget.getXLen(),
+                                     Subtarget);
   return Cost <= FPImmCost;
 }
 
@@ -5119,8 +5118,7 @@ static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG,
   if (!Subtarget.useConstantPoolForLargeInts())
     return Op;
 
-  RISCVMatInt::InstSeq Seq =
-      RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
+  RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Imm, Subtarget);
   if (Seq.size() <= Subtarget.getMaxBuildIntsCost())
     return Op;
 
@@ -5135,8 +5133,8 @@ static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG,
   // If we have Zba we can use (ADD_UW X, (SLLI X, 32)) to handle cases where
   // low and high 32 bits are the same and bit 31 and 63 are set.
   unsigned ShiftAmt, AddOpc;
-  RISCVMatInt::InstSeq SeqLo = RISCVMatInt::generateTwoRegInstSeq(
-      Imm, Subtarget.getFeatureBits(), ShiftAmt, AddOpc);
+  RISCVMatInt::InstSeq SeqLo =
+      RISCVMatInt::generateTwoRegInstSeq(Imm, Subtarget, ShiftAmt, AddOpc);
   if (!SeqLo.empty() && (SeqLo.size() + 2) <= Subtarget.getMaxBuildIntsCost())
     return Op;
 
@@ -15134,8 +15132,8 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
       }
       MVT NewVT = MVT::getIntegerVT(MemVT.getSizeInBits());
 
-      if (RISCVMatInt::getIntMatCost(NewC, Subtarget.getXLen(),
-                                     Subtarget.getFeatureBits(), true) <= 2 &&
+      if (RISCVMatInt::getIntMatCost(NewC, Subtarget.getXLen(), Subtarget,
+                                     true) <= 2 &&
           allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
                                          NewVT, *Store->getMemOperand())) {
         SDValue NewV = DAG.getConstant(NewC, DL, NewVT);
@@ -15440,12 +15438,12 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
 
       // Neither constant will fit into an immediate, so find materialisation
       // costs.
-      int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
-                                              Subtarget.getFeatureBits(),
-                                              /*CompressionCost*/true);
+      int C1Cost =
+          RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(), Subtarget,
+                                     /*CompressionCost*/ true);
       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
-          ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
-          /*CompressionCost*/true);
+          ShiftedC1Int, Ty.getSizeInBits(), Subtarget,
+          /*CompressionCost*/ true);
 
       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
       // combine should be prevented.

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 1d19faa4e06e8a4..01f2bb9d730375e 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -822,8 +822,7 @@ void RISCVInstrInfo::movImm(MachineBasicBlock &MBB,
   if (!STI.is64Bit() && !isInt<32>(Val))
     report_fatal_error("Should only materialize 32-bit constants for RV32");
 
-  RISCVMatInt::InstSeq Seq =
-      RISCVMatInt::generateInstSeq(Val, STI.getFeatureBits());
+  RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Val, STI);
   assert(!Seq.empty());
 
   bool SrcRenamable = false;

diff  --git a/llvm/lib/Target/RISCV/RISCVPostRAExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVPostRAExpandPseudoInsts.cpp
index bc9b66d6ca6b114..57b473645ae7a47 100644
--- a/llvm/lib/Target/RISCV/RISCVPostRAExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/RISCV/RISCVPostRAExpandPseudoInsts.cpp
@@ -88,8 +88,8 @@ bool RISCVPostRAExpandPseudo::expandMovImm(MachineBasicBlock &MBB,
 
   int64_t Val = MBBI->getOperand(1).getImm();
 
-  RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(
-      Val, MBB.getParent()->getSubtarget().getFeatureBits());
+  RISCVMatInt::InstSeq Seq =
+      RISCVMatInt::generateInstSeq(Val, MBB.getParent()->getSubtarget());
   assert(!Seq.empty());
 
   Register DstReg = MBBI->getOperand(0).getReg();

diff  --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 25bbb189cadd835..51a8b2384c0e17a 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -45,8 +45,7 @@ InstructionCost RISCVTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
 
   // Otherwise, we check how many instructions it will take to materialise.
   const DataLayout &DL = getDataLayout();
-  return RISCVMatInt::getIntMatCost(Imm, DL.getTypeSizeInBits(Ty),
-                                    getST()->getFeatureBits());
+  return RISCVMatInt::getIntMatCost(Imm, DL.getTypeSizeInBits(Ty), *getST());
 }
 
 // Look for patterns of shift followed by AND that can be turned into a pair of


        


More information about the llvm-commits mailing list