[llvm] 9d1bcb7 - [RISCV] Use GPR register class for RV64 ZDInx. Remove GPRF64 register class.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 22 09:41:36 PDT 2023


Author: Craig Topper
Date: 2023-06-22T09:38:46-07:00
New Revision: 9d1bcb70ec8830ad85c0756a03022d3811537284

URL: https://github.com/llvm/llvm-project/commit/9d1bcb70ec8830ad85c0756a03022d3811537284
DIFF: https://github.com/llvm/llvm-project/commit/9d1bcb70ec8830ad85c0756a03022d3811537284.diff

LOG: [RISCV] Use GPR register class for RV64 ZDInx. Remove GPRF64 register class.

The GPRF64 has the same spill size as GPR and is only used for RV64.
There's no real reason to have it as a separate class other than
for type inference for isel patterns in tablegen.

This patch adds f64 to the GPR register class when XLen=64. I use
f32 when XLen=32 even though we don't make use of it just to avoid
the oddity.

isel patterns have been updated to fix the lack of type infererence.

I might do similar for GPRF16 and GPRF32 or I might change them to
use an optimized spill size instead of always using XLen.

Reviewed By: asb

Differential Revision: https://reviews.llvm.org/D153110

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfo.td
    llvm/lib/Target/RISCV/RISCVInstrInfoA.td
    llvm/lib/Target/RISCV/RISCVInstrInfoD.td
    llvm/lib/Target/RISCV/RISCVInstrInfoF.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
    llvm/lib/Target/RISCV/RISCVInstrInfoXTHead.td
    llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td
    llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
    llvm/lib/Target/RISCV/RISCVInstrInfoZfa.td
    llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
    llvm/lib/Target/RISCV/RISCVInstrInfoZicond.td
    llvm/lib/Target/RISCV/RISCVInstrInfoZk.td
    llvm/lib/Target/RISCV/RISCVRegisterInfo.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a7c5290d02614..2ce45594764ad 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -126,7 +126,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     addRegisterClass(MVT::f32, &RISCV::GPRF32RegClass);
   if (Subtarget.hasStdExtZdinx()) {
     if (Subtarget.is64Bit())
-      addRegisterClass(MVT::f64, &RISCV::GPRF64RegClass);
+      addRegisterClass(MVT::f64, &RISCV::GPRRegClass);
     else
       addRegisterClass(MVT::f64, &RISCV::GPRPF64RegClass);
   }
@@ -13827,7 +13827,7 @@ static MachineBasicBlock *emitFROUND(MachineInstr &MI, MachineBasicBlock *MBB,
     I2FOpc = RISCV::FCVT_D_L_INX;
     FSGNJOpc = RISCV::FSGNJ_D_INX;
     FSGNJXOpc = RISCV::FSGNJX_D_INX;
-    RC = &RISCV::GPRF64RegClass;
+    RC = &RISCV::GPRRegClass;
     break;
   }
 
@@ -16016,7 +16016,6 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
   // Subtarget into account.
   if (Res.second == &RISCV::GPRF16RegClass ||
       Res.second == &RISCV::GPRF32RegClass ||
-      Res.second == &RISCV::GPRF64RegClass ||
       Res.second == &RISCV::GPRPF64RegClass)
     return std::make_pair(Res.first, &RISCV::GPRRegClass);
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index eb675d9581571..169ab6704f09c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1203,9 +1203,9 @@ def : InstAlias<".insn_s $opcode, $funct3, $rs2, ${imm12}(${rs1})",
 /// Generic pattern classes
 
 class PatGpr<SDPatternOperator OpNode, RVInst Inst, ValueType vt = XLenVT>
-    : Pat<(vt (OpNode GPR:$rs1)), (Inst GPR:$rs1)>;
+    : Pat<(vt (OpNode (vt GPR:$rs1))), (Inst GPR:$rs1)>;
 class PatGprGpr<SDPatternOperator OpNode, RVInst Inst, ValueType vt = XLenVT>
-    : Pat<(vt (OpNode GPR:$rs1, GPR:$rs2)), (Inst GPR:$rs1, GPR:$rs2)>;
+    : Pat<(vt (OpNode (vt GPR:$rs1), (vt GPR:$rs2))), (Inst GPR:$rs1, GPR:$rs2)>;
 
 class PatGprImm<SDPatternOperator OpNode, RVInst Inst, ImmLeaf ImmType>
     : Pat<(XLenVT (OpNode (XLenVT GPR:$rs1), ImmType:$imm)),
@@ -1322,7 +1322,7 @@ def PseudoAddTPRel : Pseudo<(outs GPR:$rd),
 
 /// FrameIndex calculations
 
-def : Pat<(FrameAddrRegImm GPR:$rs1, simm12:$imm12),
+def : Pat<(FrameAddrRegImm (iPTR GPR:$rs1), simm12:$imm12),
           (ADDI GPR:$rs1, simm12:$imm12)>;
 
 /// HI and ADD_LO address nodes.
@@ -1365,9 +1365,9 @@ def riscv_seteq : ComplexPattern<XLenVT, 1, "selectSETEQ", [setcc]>;
 
 // Define pattern expansions for setcc operations that aren't directly
 // handled by a RISC-V instruction.
-def : Pat<(riscv_seteq GPR:$rs1), (SLTIU GPR:$rs1, 1)>;
-def : Pat<(riscv_setne GPR:$rs1), (SLTU X0, GPR:$rs1)>;
-def : Pat<(setne GPR:$rs1, -1), (SLTIU GPR:$rs1, -1)>;
+def : Pat<(riscv_seteq (XLenVT GPR:$rs1)), (SLTIU GPR:$rs1, 1)>;
+def : Pat<(riscv_setne (XLenVT GPR:$rs1)), (SLTU (XLenVT X0), GPR:$rs1)>;
+def : Pat<(XLenVT (setne (XLenVT GPR:$rs1), -1)), (SLTIU GPR:$rs1, -1)>;
 
 def IntCCtoRISCVCC : SDNodeXForm<riscv_selectcc, [{
   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
@@ -1390,8 +1390,9 @@ def PseudoCCMOVGPR : Pseudo<(outs GPR:$dst),
                             (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc,
                              GPR:$falsev, GPR:$truev),
                             [(set GPR:$dst,
-                              (riscv_selectcc_frag:$cc GPR:$lhs, GPR:$rhs,
-                                                       cond, GPR:$truev,
+                              (riscv_selectcc_frag:$cc (XLenVT GPR:$lhs),
+                                                       GPR:$rhs, cond,
+                                                       (XLenVT GPR:$truev),
                                                        GPR:$falsev))]>,
                      Sched<[WriteSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB]>;
 }
@@ -1434,29 +1435,29 @@ def PseudoCCSUBW : Pseudo<(outs GPR:$dst),
                    Sched<[WriteSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB]>;
 }
 
-multiclass SelectCC_GPR_rrirr<DAGOperand valty> {
+multiclass SelectCC_GPR_rrirr<DAGOperand valty, ValueType vt> {
   let usesCustomInserter = 1 in
   def _Using_CC_GPR : Pseudo<(outs valty:$dst),
                              (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc,
                               valty:$truev, valty:$falsev),
                              [(set valty:$dst,
-                               (riscv_selectcc_frag:$cc GPR:$lhs, GPR:$rhs, cond,
-                                                        valty:$truev, valty:$falsev))]>;
+                               (riscv_selectcc_frag:$cc (XLenVT GPR:$lhs), GPR:$rhs, cond,
+                                                        (vt valty:$truev), valty:$falsev))]>;
   // Explicitly select 0 in the condition to X0. The register coalescer doesn't
   // always do it.
-  def : Pat<(riscv_selectcc_frag:$cc GPR:$lhs, 0, cond, valty:$truev,
+  def : Pat<(riscv_selectcc_frag:$cc (XLenVT GPR:$lhs), 0, cond, (vt valty:$truev),
                                      valty:$falsev),
-            (!cast<Instruction>(NAME#"_Using_CC_GPR") GPR:$lhs, X0,
+            (!cast<Instruction>(NAME#"_Using_CC_GPR") GPR:$lhs, (XLenVT X0),
              (IntCCtoRISCVCC $cc), valty:$truev, valty:$falsev)>;
 }
 
 let Predicates = [NoShortForwardBranchOpt] in
-defm Select_GPR : SelectCC_GPR_rrirr<GPR>;
+defm Select_GPR : SelectCC_GPR_rrirr<GPR, XLenVT>;
 
 class SelectCompressOpt<CondCode Cond>
-    : Pat<(riscv_selectcc_frag:$select GPR:$lhs, simm12_no6:$Constant, Cond,
-                                       GPR:$truev, GPR:$falsev),
-    (Select_GPR_Using_CC_GPR (ADDI GPR:$lhs, (NegImm simm12:$Constant)), X0,
+    : Pat<(riscv_selectcc_frag:$select (XLenVT GPR:$lhs), simm12_no6:$Constant, Cond,
+                                       (XLenVT GPR:$truev), GPR:$falsev),
+    (Select_GPR_Using_CC_GPR (ADDI GPR:$lhs, (NegImm simm12:$Constant)), (XLenVT X0),
                           (IntCCtoRISCVCC $select), GPR:$truev, GPR:$falsev)>;
 
 def OptForMinSize : Predicate<"MF ? MF->getFunction().hasMinSize() : false">;
@@ -1470,16 +1471,16 @@ let Predicates = [HasStdExtC, OptForMinSize] in {
 
 // Match `riscv_brcc` and lower to the appropriate RISC-V branch instruction.
 multiclass BccPat<CondCode Cond, RVInstB Inst> {
-  def : Pat<(riscv_brcc GPR:$rs1, GPR:$rs2, Cond, bb:$imm12),
+  def : Pat<(riscv_brcc (XLenVT GPR:$rs1), GPR:$rs2, Cond, bb:$imm12),
             (Inst GPR:$rs1, GPR:$rs2, simm13_lsb0:$imm12)>;
   // Explicitly select 0 to X0. The register coalescer doesn't always do it.
-  def : Pat<(riscv_brcc GPR:$rs1, 0, Cond, bb:$imm12),
-            (Inst GPR:$rs1, X0, simm13_lsb0:$imm12)>;
+  def : Pat<(riscv_brcc (XLenVT GPR:$rs1), 0, Cond, bb:$imm12),
+            (Inst GPR:$rs1, (XLenVT X0), simm13_lsb0:$imm12)>;
 }
 
 class BrccCompressOpt<CondCode Cond, RVInstB Inst>
     : Pat<(riscv_brcc GPR:$lhs, simm12_no6:$Constant, Cond, bb:$place),
-          (Inst (ADDI GPR:$lhs, (NegImm simm12:$Constant)), X0, bb:$place)>;
+          (Inst (ADDI GPR:$lhs, (NegImm simm12:$Constant)), (XLenVT X0), bb:$place)>;
 
 defm : BccPat<SETEQ, BEQ>;
 defm : BccPat<SETNE, BNE>;
@@ -1552,8 +1553,8 @@ def PseudoCALL : Pseudo<(outs), (ins call_symbol:$func), []>,
 def : Pat<(riscv_call tglobaladdr:$func), (PseudoCALL tglobaladdr:$func)>;
 def : Pat<(riscv_call texternalsym:$func), (PseudoCALL texternalsym:$func)>;
 
-def : Pat<(riscv_sret_glue), (SRET X0, X0)>;
-def : Pat<(riscv_mret_glue), (MRET X0, X0)>;
+def : Pat<(riscv_sret_glue), (SRET (XLenVT X0), (XLenVT X0))>;
+def : Pat<(riscv_mret_glue), (MRET (XLenVT X0), (XLenVT X0))>;
 
 let isCall = 1, Defs = [X1] in
 def PseudoCALLIndirect : Pseudo<(outs), (ins GPRJALR:$rs1),
@@ -1606,7 +1607,7 @@ let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 0,
 def PseudoLGA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
                        "lga", "$dst, $src">;
 
-def : Pat<(riscv_lga tglobaladdr:$in), (PseudoLGA tglobaladdr:$in)>;
+def : Pat<(iPTR (riscv_lga tglobaladdr:$in)), (PseudoLGA tglobaladdr:$in)>;
 
 let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 0,
     isAsmParserOnly = 1 in
@@ -1618,7 +1619,7 @@ let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 0,
 def PseudoLA_TLS_IE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
                              "la.tls.ie", "$dst, $src">;
 
-def : Pat<(riscv_la_tls_ie tglobaltlsaddr:$in),
+def : Pat<(iPTR (riscv_la_tls_ie tglobaltlsaddr:$in)),
           (PseudoLA_TLS_IE  tglobaltlsaddr:$in)>;
 
 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8, isCodeGenOnly = 0,
@@ -1650,7 +1651,7 @@ def PseudoZEXT_W : Pseudo<(outs GPR:$rd), (ins GPR:$rs), [], "zext.w", "$rd, $rs
 /// Loads
 
 class LdPat<PatFrag LoadOp, RVInst Inst, ValueType vt = XLenVT>
-    : Pat<(vt (LoadOp (AddrRegImm GPR:$rs1, simm12:$imm12))),
+    : Pat<(vt (LoadOp (AddrRegImm (XLenVT GPR:$rs1), simm12:$imm12))),
           (Inst GPR:$rs1, simm12:$imm12)>;
 
 def : LdPat<sextloadi8, LB>;
@@ -1665,7 +1666,8 @@ def : LdPat<zextloadi16, LHU>;
 
 class StPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy,
             ValueType vt>
-    : Pat<(StoreOp (vt StTy:$rs2), (AddrRegImm GPR:$rs1, simm12:$imm12)),
+    : Pat<(StoreOp (vt StTy:$rs2), (AddrRegImm (XLenVT GPR:$rs1),
+                   simm12:$imm12)),
           (Inst StTy:$rs2, GPR:$rs1, simm12:$imm12)>;
 
 def : StPat<truncstorei8, SB, GPR, XLenVT>;
@@ -1700,7 +1702,7 @@ def : Pat<(atomic_fence (XLenVT 7), (timm)), (FENCE 0b11, 0b11)>;
 
 class ReadSysReg<SysReg SR, list<Register> Regs>
   : Pseudo<(outs GPR:$rd), (ins),
-           [(set GPR:$rd, (riscv_read_csr (XLenVT SR.Encoding)))]>,
+           [(set GPR:$rd, (XLenVT (riscv_read_csr (XLenVT SR.Encoding))))]>,
     PseudoInstExpansion<(CSRRS GPR:$rd, SR.Encoding, X0)> {
   let hasSideEffects = 0;
   let Uses = Regs;
@@ -1708,7 +1710,7 @@ class ReadSysReg<SysReg SR, list<Register> Regs>
 
 class WriteSysReg<SysReg SR, list<Register> Regs>
   : Pseudo<(outs), (ins GPR:$val),
-           [(riscv_write_csr (XLenVT SR.Encoding), GPR:$val)]>,
+           [(riscv_write_csr (XLenVT SR.Encoding), (XLenVT GPR:$val))]>,
     PseudoInstExpansion<(CSRRW X0, SR.Encoding, GPR:$val)> {
   let hasSideEffects = 0;
   let Defs = Regs;
@@ -1724,7 +1726,7 @@ class WriteSysRegImm<SysReg SR, list<Register> Regs>
 
 class SwapSysReg<SysReg SR, list<Register> Regs>
   : Pseudo<(outs GPR:$rd), (ins GPR:$val),
-           [(set GPR:$rd, (riscv_swap_csr (XLenVT SR.Encoding), GPR:$val))]>,
+           [(set GPR:$rd, (riscv_swap_csr (XLenVT SR.Encoding), (XLenVT GPR:$val)))]>,
     PseudoInstExpansion<(CSRRW GPR:$rd, SR.Encoding, GPR:$val)> {
   let hasSideEffects = 0;
   let Uses = Regs;
@@ -1733,7 +1735,7 @@ class SwapSysReg<SysReg SR, list<Register> Regs>
 
 class SwapSysRegImm<SysReg SR, list<Register> Regs>
   : Pseudo<(outs GPR:$rd), (ins uimm5:$val),
-           [(set GPR:$rd, (riscv_swap_csr (XLenVT SR.Encoding), uimm5:$val))]>,
+           [(set GPR:$rd, (XLenVT (riscv_swap_csr (XLenVT SR.Encoding), uimm5:$val)))]>,
     PseudoInstExpansion<(CSRRWI GPR:$rd, SR.Encoding, uimm5:$val)> {
   let hasSideEffects = 0;
   let Uses = Regs;
@@ -1860,7 +1862,7 @@ def : StPat<store, SD, GPR, i64>;
 /// readcyclecounter
 // On RV64, we can directly read the 64-bit "cycle" CSR.
 let Predicates = [IsRV64] in
-def : Pat<(i64 (readcyclecounter)), (CSRRS CYCLE.Encoding, X0)>;
+def : Pat<(i64 (readcyclecounter)), (CSRRS CYCLE.Encoding, (XLenVT X0))>;
 // On RV32, ReadCycleWide will be expanded to the suggested loop reading both
 // halves of the 64-bit "cycle" CSR.
 let Predicates = [IsRV32], usesCustomInserter = 1, hasNoSchedulingInfo = 1 in
@@ -1886,7 +1888,7 @@ def HWASAN_CHECK_MEMACCESS_SHORTGRANULES
                                                       (i32 timm:$accessinfo))]>;
 
 /// Simple optimization
-def : Pat<(add GPR:$rs1, (AddiPair:$rs2)),
+def : Pat<(XLenVT (add GPR:$rs1, (AddiPair:$rs2))),
           (ADDI (ADDI GPR:$rs1, (AddiPairImmLarge AddiPair:$rs2)),
                 (AddiPairImmSmall GPR:$rs2))>;
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
index f2e016602d76e..b388168386408 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
@@ -45,7 +45,8 @@ multiclass AMO_rr_aq_rl<bits<5> funct5, bits<3> funct3, string opcodestr> {
 
 class AtomicStPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy,
                   ValueType vt = XLenVT>
-    : Pat<(StoreOp (AddrRegImm GPR:$rs1, simm12:$imm12), (vt StTy:$rs2)),
+    : Pat<(StoreOp (AddrRegImm (XLenVT GPR:$rs1), simm12:$imm12),
+                   (vt StTy:$rs2)),
           (Inst StTy:$rs2, GPR:$rs1, simm12:$imm12)>;
 
 //===----------------------------------------------------------------------===//
@@ -149,16 +150,16 @@ defm : AMOPat<"atomic_load_min_32", "AMOMIN_W">;
 defm : AMOPat<"atomic_load_umax_32", "AMOMAXU_W">;
 defm : AMOPat<"atomic_load_umin_32", "AMOMINU_W">;
 
-def : Pat<(atomic_load_sub_32_monotonic GPR:$addr, GPR:$incr),
-          (AMOADD_W GPR:$addr, (SUB X0, GPR:$incr))>;
-def : Pat<(atomic_load_sub_32_acquire GPR:$addr, GPR:$incr),
-          (AMOADD_W_AQ GPR:$addr, (SUB X0, GPR:$incr))>;
-def : Pat<(atomic_load_sub_32_release GPR:$addr, GPR:$incr),
-          (AMOADD_W_RL GPR:$addr, (SUB X0, GPR:$incr))>;
-def : Pat<(atomic_load_sub_32_acq_rel GPR:$addr, GPR:$incr),
-          (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
-def : Pat<(atomic_load_sub_32_seq_cst GPR:$addr, GPR:$incr),
-          (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
+def : Pat<(XLenVT (atomic_load_sub_32_monotonic GPR:$addr, GPR:$incr)),
+          (AMOADD_W GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
+def : Pat<(XLenVT (atomic_load_sub_32_acquire GPR:$addr, GPR:$incr)),
+          (AMOADD_W_AQ GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
+def : Pat<(XLenVT (atomic_load_sub_32_release GPR:$addr, GPR:$incr)),
+          (AMOADD_W_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
+def : Pat<(XLenVT (atomic_load_sub_32_acq_rel GPR:$addr, GPR:$incr)),
+          (AMOADD_W_AQ_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
+def : Pat<(XLenVT (atomic_load_sub_32_seq_cst GPR:$addr, GPR:$incr)),
+          (AMOADD_W_AQ_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
 
 /// Pseudo AMOs
 
@@ -174,15 +175,15 @@ let Size = 20 in
 def PseudoAtomicLoadNand32 : PseudoAMO;
 // Ordering constants must be kept in sync with the AtomicOrdering enum in
 // AtomicOrdering.h.
-def : Pat<(atomic_load_nand_32_monotonic GPR:$addr, GPR:$incr),
+def : Pat<(XLenVT (atomic_load_nand_32_monotonic GPR:$addr, GPR:$incr)),
           (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 2)>;
-def : Pat<(atomic_load_nand_32_acquire GPR:$addr, GPR:$incr),
+def : Pat<(XLenVT (atomic_load_nand_32_acquire GPR:$addr, GPR:$incr)),
           (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 4)>;
-def : Pat<(atomic_load_nand_32_release GPR:$addr, GPR:$incr),
+def : Pat<(XLenVT (atomic_load_nand_32_release GPR:$addr, GPR:$incr)),
           (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 5)>;
-def : Pat<(atomic_load_nand_32_acq_rel GPR:$addr, GPR:$incr),
+def : Pat<(XLenVT (atomic_load_nand_32_acq_rel GPR:$addr, GPR:$incr)),
           (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 6)>;
-def : Pat<(atomic_load_nand_32_seq_cst GPR:$addr, GPR:$incr),
+def : Pat<(XLenVT (atomic_load_nand_32_seq_cst GPR:$addr, GPR:$incr)),
           (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 7)>;
 
 class PseudoMaskedAMO
@@ -322,15 +323,15 @@ defm : AMOPat<"atomic_load_umin_64", "AMOMINU_D", i64>;
 /// 64-bit AMOs
 
 def : Pat<(i64 (atomic_load_sub_64_monotonic GPR:$addr, GPR:$incr)),
-          (AMOADD_D GPR:$addr, (SUB X0, GPR:$incr))>;
+          (AMOADD_D GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
 def : Pat<(i64 (atomic_load_sub_64_acquire GPR:$addr, GPR:$incr)),
-          (AMOADD_D_AQ GPR:$addr, (SUB X0, GPR:$incr))>;
+          (AMOADD_D_AQ GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
 def : Pat<(i64 (atomic_load_sub_64_release GPR:$addr, GPR:$incr)),
-          (AMOADD_D_RL GPR:$addr, (SUB X0, GPR:$incr))>;
+          (AMOADD_D_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
 def : Pat<(i64 (atomic_load_sub_64_acq_rel GPR:$addr, GPR:$incr)),
-          (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
+          (AMOADD_D_AQ_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
 def : Pat<(i64 (atomic_load_sub_64_seq_cst GPR:$addr, GPR:$incr)),
-          (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
+          (AMOADD_D_AQ_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
 
 /// 64-bit pseudo AMOs
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
index f2099a4652a59..93a1571596fef 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
@@ -45,7 +45,7 @@ def GPRF64AsFPR : AsmOperandClass {
   let RenderMethod = "addRegOperands";
 }
 
-def FPR64INX : RegisterOperand<GPRF64> {
+def FPR64INX : RegisterOperand<GPR> {
   let ParserMatchClass = GPRF64AsFPR;
   let DecoderMethod = "DecodeGPRRegisterClass";
 }
@@ -394,76 +394,76 @@ foreach Ext = DExts in {
 
 // Match non-signaling FEQ_D
 foreach Ext = DExts in {
-  defm : PatSetCC_m<any_fsetcc,    SETEQ,  FEQ_D,            Ext>;
-  defm : PatSetCC_m<any_fsetcc,    SETOEQ, FEQ_D,            Ext>;
-  defm : PatSetCC_m<strict_fsetcc, SETLT,  PseudoQuietFLT_D, Ext>;
-  defm : PatSetCC_m<strict_fsetcc, SETOLT, PseudoQuietFLT_D, Ext>;
-  defm : PatSetCC_m<strict_fsetcc, SETLE,  PseudoQuietFLE_D, Ext>;
-  defm : PatSetCC_m<strict_fsetcc, SETOLE, PseudoQuietFLE_D, Ext>;
+  defm : PatSetCC_m<any_fsetcc,    SETEQ,  FEQ_D,            Ext, f64>;
+  defm : PatSetCC_m<any_fsetcc,    SETOEQ, FEQ_D,            Ext, f64>;
+  defm : PatSetCC_m<strict_fsetcc, SETLT,  PseudoQuietFLT_D, Ext, f64>;
+  defm : PatSetCC_m<strict_fsetcc, SETOLT, PseudoQuietFLT_D, Ext, f64>;
+  defm : PatSetCC_m<strict_fsetcc, SETLE,  PseudoQuietFLE_D, Ext, f64>;
+  defm : PatSetCC_m<strict_fsetcc, SETOLE, PseudoQuietFLE_D, Ext, f64>;
 }
 
 let Predicates = [HasStdExtD] in {
 // Match signaling FEQ_D
-def : Pat<(strict_fsetccs FPR64:$rs1, FPR64:$rs2, SETEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR64:$rs1, FPR64:$rs2, SETEQ)),
           (AND (FLE_D $rs1, $rs2),
                (FLE_D $rs2, $rs1))>;
-def : Pat<(strict_fsetccs FPR64:$rs1, FPR64:$rs2, SETOEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR64:$rs1, FPR64:$rs2, SETOEQ)),
           (AND (FLE_D $rs1, $rs2),
                (FLE_D $rs2, $rs1))>;
 // If both operands are the same, use a single FLE.
-def : Pat<(strict_fsetccs FPR64:$rs1, FPR64:$rs1, SETEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR64:$rs1, FPR64:$rs1, SETEQ)),
           (FLE_D $rs1, $rs1)>;
-def : Pat<(strict_fsetccs FPR64:$rs1, FPR64:$rs1, SETOEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR64:$rs1, FPR64:$rs1, SETOEQ)),
           (FLE_D $rs1, $rs1)>;
 
-def : PatSetCC<FPR64, any_fsetccs, SETLT, FLT_D>;
-def : PatSetCC<FPR64, any_fsetccs, SETOLT, FLT_D>;
-def : PatSetCC<FPR64, any_fsetccs, SETLE, FLE_D>;
-def : PatSetCC<FPR64, any_fsetccs, SETOLE, FLE_D>;
+def : PatSetCC<FPR64, any_fsetccs, SETLT, FLT_D, f64>;
+def : PatSetCC<FPR64, any_fsetccs, SETOLT, FLT_D, f64>;
+def : PatSetCC<FPR64, any_fsetccs, SETLE, FLE_D, f64>;
+def : PatSetCC<FPR64, any_fsetccs, SETOLE, FLE_D, f64>;
 } // Predicates = [HasStdExtD]
 
 let Predicates = [HasStdExtZdinx, IsRV64] in {
 // Match signaling FEQ_D
-def : Pat<(strict_fsetccs FPR64INX:$rs1, FPR64INX:$rs2, SETEQ),
+def : Pat<(XLenVT (strict_fsetccs (f64 FPR64INX:$rs1), FPR64INX:$rs2, SETEQ)),
           (AND (FLE_D_INX $rs1, $rs2),
                (FLE_D_INX $rs2, $rs1))>;
-def : Pat<(strict_fsetccs FPR64INX:$rs1, FPR64INX:$rs2, SETOEQ),
+def : Pat<(XLenVT (strict_fsetccs (f64 FPR64INX:$rs1), FPR64INX:$rs2, SETOEQ)),
           (AND (FLE_D_INX $rs1, $rs2),
                (FLE_D_INX $rs2, $rs1))>;
 // If both operands are the same, use a single FLE.
-def : Pat<(strict_fsetccs FPR64INX:$rs1, FPR64INX:$rs1, SETEQ),
+def : Pat<(XLenVT (strict_fsetccs (f64 FPR64INX:$rs1), FPR64INX:$rs1, SETEQ)),
           (FLE_D_INX $rs1, $rs1)>;
-def : Pat<(strict_fsetccs FPR64INX:$rs1, FPR64INX:$rs1, SETOEQ),
+def : Pat<(XLenVT (strict_fsetccs (f64 FPR64INX:$rs1), FPR64INX:$rs1, SETOEQ)),
           (FLE_D_INX $rs1, $rs1)>;
 
-def : PatSetCC<FPR64INX, any_fsetccs, SETLT,  FLT_D_INX>;
-def : PatSetCC<FPR64INX, any_fsetccs, SETOLT, FLT_D_INX>;
-def : PatSetCC<FPR64INX, any_fsetccs, SETLE,  FLE_D_INX>;
-def : PatSetCC<FPR64INX, any_fsetccs, SETOLE, FLE_D_INX>;
+def : PatSetCC<FPR64INX, any_fsetccs, SETLT,  FLT_D_INX, f64>;
+def : PatSetCC<FPR64INX, any_fsetccs, SETOLT, FLT_D_INX, f64>;
+def : PatSetCC<FPR64INX, any_fsetccs, SETLE,  FLE_D_INX, f64>;
+def : PatSetCC<FPR64INX, any_fsetccs, SETOLE, FLE_D_INX, f64>;
 } // Predicates = [HasStdExtZdinx, IsRV64]
 
 let Predicates = [HasStdExtZdinx, IsRV32] in {
 // Match signaling FEQ_D
-def : Pat<(strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs2, SETEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs2, SETEQ)),
           (AND (FLE_D_IN32X $rs1, $rs2),
                (FLE_D_IN32X $rs2, $rs1))>;
-def : Pat<(strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs2, SETOEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs2, SETOEQ)),
           (AND (FLE_D_IN32X $rs1, $rs2),
                (FLE_D_IN32X $rs2, $rs1))>;
 // If both operands are the same, use a single FLE.
-def : Pat<(strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs1, SETEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs1, SETEQ)),
           (FLE_D_IN32X $rs1, $rs1)>;
-def : Pat<(strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs1, SETOEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs1, SETOEQ)),
           (FLE_D_IN32X $rs1, $rs1)>;
 
-def : PatSetCC<FPR64IN32X, any_fsetccs, SETLT,  FLT_D_IN32X>;
-def : PatSetCC<FPR64IN32X, any_fsetccs, SETOLT, FLT_D_IN32X>;
-def : PatSetCC<FPR64IN32X, any_fsetccs, SETLE,  FLE_D_IN32X>;
-def : PatSetCC<FPR64IN32X, any_fsetccs, SETOLE, FLE_D_IN32X>;
+def : PatSetCC<FPR64IN32X, any_fsetccs, SETLT,  FLT_D_IN32X, f64>;
+def : PatSetCC<FPR64IN32X, any_fsetccs, SETOLT, FLT_D_IN32X, f64>;
+def : PatSetCC<FPR64IN32X, any_fsetccs, SETLE,  FLE_D_IN32X, f64>;
+def : PatSetCC<FPR64IN32X, any_fsetccs, SETOLE, FLE_D_IN32X, f64>;
 } // Predicates = [HasStdExtZdinx, IsRV32]
 
 let Predicates = [HasStdExtD] in {
-defm Select_FPR64 : SelectCC_GPR_rrirr<FPR64>;
+defm Select_FPR64 : SelectCC_GPR_rrirr<FPR64, f64>;
 
 def PseudoFROUND_D : PseudoFROUND<FPR64>;
 
@@ -492,34 +492,32 @@ def SplitF64Pseudo
 } // Predicates = [HasStdExtD]
 
 let Predicates = [HasStdExtZdinx, IsRV64] in {
-defm Select_FPR64INX : SelectCC_GPR_rrirr<FPR64INX>;
+defm Select_FPR64INX : SelectCC_GPR_rrirr<FPR64INX, f64>;
 
 def PseudoFROUND_D_INX : PseudoFROUND<FPR64INX>;
 
 /// Loads
-def : Pat<(f64 (load (AddrRegImm GPR:$rs1, simm12:$imm12))),
-          (COPY_TO_REGCLASS (LD GPR:$rs1, simm12:$imm12), GPRF64)>;
+def : LdPat<load, LD, f64>;
 
 /// Stores
-def : Pat<(store (f64 FPR64INX:$rs2), (AddrRegImm GPR:$rs1, simm12:$imm12)),
-          (SD (COPY_TO_REGCLASS FPR64INX:$rs2, GPR), GPR:$rs1, simm12:$imm12)>;
+def : StPat<store, SD, GPR, f64>;
 } // Predicates = [HasStdExtZdinx, IsRV64]
 
 let Predicates = [HasStdExtZdinx, IsRV32] in {
-defm Select_FPR64IN32X : SelectCC_GPR_rrirr<FPR64IN32X>;
+defm Select_FPR64IN32X : SelectCC_GPR_rrirr<FPR64IN32X, f64>;
 
 def PseudoFROUND_D_IN32X : PseudoFROUND<FPR64IN32X>;
 
 /// Loads
 let isCall = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 1 in
 def PseudoRV32ZdinxLD : Pseudo<(outs GPRPF64:$dst), (ins GPR:$rs1, simm12:$imm12), []>;
-def : Pat<(f64 (load (AddrRegImmINX GPR:$rs1, simm12:$imm12))),
+def : Pat<(f64 (load (AddrRegImmINX (XLenVT GPR:$rs1), simm12:$imm12))),
           (PseudoRV32ZdinxLD GPR:$rs1, simm12:$imm12)>;
 
 /// Stores
 let isCall = 0, mayLoad = 0, mayStore = 1, Size = 8, isCodeGenOnly = 1 in
 def PseudoRV32ZdinxSD : Pseudo<(outs), (ins GPRPF64:$rs2, GPRNoX0:$rs1, simm12:$imm12), []>;
-def : Pat<(store (f64 GPRPF64:$rs2), (AddrRegImmINX GPR:$rs1, simm12:$imm12)),
+def : Pat<(store (f64 GPRPF64:$rs2), (AddrRegImmINX (XLenVT GPR:$rs1), simm12:$imm12)),
           (PseudoRV32ZdinxSD GPRPF64:$rs2, GPR:$rs1, simm12:$imm12)>;
 
 /// Pseudo-instructions needed for the soft-float ABI with RV32D
@@ -619,8 +617,8 @@ def : Pat<(any_uint_to_fp (i64 GPR:$rs1)), (FCVT_D_LU GPR:$rs1, FRM_DYN)>;
 let Predicates = [HasStdExtZdinx, IsRV64] in {
 
 // Moves (no conversion)
-def : Pat<(f64 (bitconvert (i64 GPR:$rs1))), (COPY_TO_REGCLASS GPR:$rs1, GPRF64)>;
-def : Pat<(i64 (bitconvert FPR64INX:$rs1)), (COPY_TO_REGCLASS FPR64INX:$rs1, GPR)>;
+def : Pat<(f64 (bitconvert (i64 GPR:$rs1))), (COPY_TO_REGCLASS GPR:$rs1, GPR)>;
+def : Pat<(i64 (bitconvert (f64 GPR:$rs1))), (COPY_TO_REGCLASS GPR:$rs1, GPR)>;
 
 // Use target specific isd nodes to help us remember the result is sign
 // extended. Matching sext_inreg+fptoui/fptosi may cause the conversion to be

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
index bce4fe2e5ae98..e7f03af68094c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
@@ -436,13 +436,13 @@ def fpimm0    : PatLeaf<(fpimm), [{ return N->isExactlyValue(+0.0); }]>;
 
 /// Generic pattern classes
 class PatSetCC<DAGOperand Ty, SDPatternOperator OpNode, CondCode Cond,
-               RVInst Inst>
-    : Pat<(OpNode Ty:$rs1, Ty:$rs2, Cond), (Inst $rs1, $rs2)>;
+               RVInst Inst, ValueType vt>
+    : Pat<(XLenVT (OpNode (vt Ty:$rs1), Ty:$rs2, Cond)), (Inst $rs1, $rs2)>;
 multiclass PatSetCC_m<SDPatternOperator OpNode, CondCode Cond,
-                      RVInst Inst, ExtInfo Ext> {
+                      RVInst Inst, ExtInfo Ext, ValueType vt> {
   let Predicates = Ext.Predicates in
   def Ext.Suffix : PatSetCC<Ext.PrimaryTy, OpNode, Cond,
-                            !cast<RVInst>(Inst#Ext.Suffix)>;
+                            !cast<RVInst>(Inst#Ext.Suffix), vt>;
 }
 
 class PatFprFpr<SDPatternOperator OpNode, RVInstR Inst,
@@ -562,53 +562,53 @@ foreach Ext = FExts in {
 
 // Match non-signaling FEQ_S
 foreach Ext = FExts in {
-  defm : PatSetCC_m<any_fsetcc,    SETEQ,  FEQ_S,            Ext>;
-  defm : PatSetCC_m<any_fsetcc,    SETOEQ, FEQ_S,            Ext>;
-  defm : PatSetCC_m<strict_fsetcc, SETLT,  PseudoQuietFLT_S, Ext>;
-  defm : PatSetCC_m<strict_fsetcc, SETOLT, PseudoQuietFLT_S, Ext>;
-  defm : PatSetCC_m<strict_fsetcc, SETLE,  PseudoQuietFLE_S, Ext>;
-  defm : PatSetCC_m<strict_fsetcc, SETOLE, PseudoQuietFLE_S, Ext>;
+  defm : PatSetCC_m<any_fsetcc,    SETEQ,  FEQ_S,            Ext, f32>;
+  defm : PatSetCC_m<any_fsetcc,    SETOEQ, FEQ_S,            Ext, f32>;
+  defm : PatSetCC_m<strict_fsetcc, SETLT,  PseudoQuietFLT_S, Ext, f32>;
+  defm : PatSetCC_m<strict_fsetcc, SETOLT, PseudoQuietFLT_S, Ext, f32>;
+  defm : PatSetCC_m<strict_fsetcc, SETLE,  PseudoQuietFLE_S, Ext, f32>;
+  defm : PatSetCC_m<strict_fsetcc, SETOLE, PseudoQuietFLE_S, Ext, f32>;
 }
 
 let Predicates = [HasStdExtF] in {
 // Match signaling FEQ_S
-def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs2, SETEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR32:$rs1, FPR32:$rs2, SETEQ)),
           (AND (FLE_S $rs1, $rs2),
                (FLE_S $rs2, $rs1))>;
-def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs2, SETOEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR32:$rs1, FPR32:$rs2, SETOEQ)),
           (AND (FLE_S $rs1, $rs2),
                (FLE_S $rs2, $rs1))>;
 // If both operands are the same, use a single FLE.
-def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs1, SETEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR32:$rs1, FPR32:$rs1, SETEQ)),
           (FLE_S $rs1, $rs1)>;
-def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs1, SETOEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR32:$rs1, FPR32:$rs1, SETOEQ)),
           (FLE_S $rs1, $rs1)>;
 } // Predicates = [HasStdExtF]
 
 let Predicates = [HasStdExtZfinx] in {
 // Match signaling FEQ_S
-def : Pat<(strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs2, SETEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs2, SETEQ)),
           (AND (FLE_S_INX $rs1, $rs2),
                (FLE_S_INX $rs2, $rs1))>;
-def : Pat<(strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs2, SETOEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs2, SETOEQ)),
           (AND (FLE_S_INX $rs1, $rs2),
                (FLE_S_INX $rs2, $rs1))>;
 // If both operands are the same, use a single FLE.
-def : Pat<(strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs1, SETEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs1, SETEQ)),
           (FLE_S_INX $rs1, $rs1)>;
-def : Pat<(strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs1, SETOEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs1, SETOEQ)),
           (FLE_S_INX $rs1, $rs1)>;
 } // Predicates = [HasStdExtZfinx]
 
 foreach Ext = FExts in {
-  defm : PatSetCC_m<any_fsetccs, SETLT,  FLT_S, Ext>;
-  defm : PatSetCC_m<any_fsetccs, SETOLT, FLT_S, Ext>;
-  defm : PatSetCC_m<any_fsetccs, SETLE,  FLE_S, Ext>;
-  defm : PatSetCC_m<any_fsetccs, SETOLE, FLE_S, Ext>;
+  defm : PatSetCC_m<any_fsetccs, SETLT,  FLT_S, Ext, f32>;
+  defm : PatSetCC_m<any_fsetccs, SETOLT, FLT_S, Ext, f32>;
+  defm : PatSetCC_m<any_fsetccs, SETLE,  FLE_S, Ext, f32>;
+  defm : PatSetCC_m<any_fsetccs, SETOLE, FLE_S, Ext, f32>;
 }
 
 let Predicates = [HasStdExtF] in {
-defm Select_FPR32 : SelectCC_GPR_rrirr<FPR32>;
+defm Select_FPR32 : SelectCC_GPR_rrirr<FPR32, f32>;
 
 def PseudoFROUND_S : PseudoFROUND<FPR32>;
 
@@ -623,16 +623,16 @@ def : StPat<store, FSW, FPR32, f32>;
 } // Predicates = [HasStdExtF]
 
 let Predicates = [HasStdExtZfinx] in {
-defm Select_FPR32INX : SelectCC_GPR_rrirr<FPR32INX>;
+defm Select_FPR32INX : SelectCC_GPR_rrirr<FPR32INX, f32>;
 
 def PseudoFROUND_S_INX : PseudoFROUND<FPR32INX>;
 
 /// Loads
-def : Pat<(f32 (load (AddrRegImm GPR:$rs1, simm12:$imm12))),
+def : Pat<(f32 (load (AddrRegImm (XLenVT GPR:$rs1), simm12:$imm12))),
           (COPY_TO_REGCLASS (LW GPR:$rs1, simm12:$imm12), GPRF32)>;
 
 /// Stores
-def : Pat<(store (f32 FPR32INX:$rs2), (AddrRegImm GPR:$rs1, simm12:$imm12)),
+def : Pat<(store (f32 FPR32INX:$rs2), (AddrRegImm (XLenVT GPR:$rs1), simm12:$imm12)),
           (SW (COPY_TO_REGCLASS FPR32INX:$rs2, GPR), GPR:$rs1, simm12:$imm12)>;
 } // Predicates = [HasStdExtZfinx]
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 40b78ce8c6f7e..3f8cc0e6f07d9 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -6862,7 +6862,7 @@ defm : VPatNullaryV<"int_riscv_vid", "PseudoVID">;
 
 foreach vti = AllIntegerVectors in {
   let Predicates = GetVTypePredicates<vti>.Predicates in
-  def : Pat<(riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2)),
+  def : Pat<(XLenVT (riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2))),
             (!cast<Instruction>("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.Log2SEW)>;
   // vmv.s.x is handled with a custom node in RISCVInstrInfoVVLPatterns.td
 }
@@ -6884,7 +6884,7 @@ foreach fvti = AllFloatVectors in {
     def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1),
                            (fvti.Scalar (fpimm0)), VLOpFrag)),
               (!cast<Instruction>("PseudoVMV_S_X_" # fvti.LMul.MX)
-               (fvti.Vector $rs1), X0, GPR:$vl, fvti.Log2SEW)>;
+               (fvti.Vector $rs1), (XLenVT X0), GPR:$vl, fvti.Log2SEW)>;
   }
 }
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index c03a4fa9180fe..cb5b2d3945d4f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -107,7 +107,7 @@ class VPatBinarySDNode_XI<SDPatternOperator vop,
                           bit isSEWAware = 0> :
     Pat<(result_type (vop
                      (vop_type vop_reg_class:$rs1),
-                     (vop_type (SplatPatKind xop_kind:$rs2)))),
+                     (vop_type (SplatPatKind (XLenVT xop_kind:$rs2))))),
         (!cast<Instruction>(
                      !if(isSEWAware,
                          instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew)#"_TU",
@@ -234,7 +234,7 @@ multiclass VPatIntegerSetCCSDNode_XI<
     defvar instruction = !cast<Instruction>(instruction_name#_#kind#_#vti.LMul.MX);
     let Predicates = GetVTypePredicates<vti>.Predicates in
     def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1),
-                               (vti.Vector (SplatPatKind xop_kind:$rs2)), cc)),
+                               (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), cc)),
               (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>;
   }
 }
@@ -250,9 +250,9 @@ multiclass VPatIntegerSetCCSDNode_XI_Swappable<string instruction_name,
     defvar instruction = !cast<Instruction>(instruction_name#_#kind#_#vti.LMul.MX);
     let Predicates = GetVTypePredicates<vti>.Predicates in {
       def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1),
-                                 (vti.Vector (SplatPatKind xop_kind:$rs2)), cc)),
+                                 (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), cc)),
                 (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>;
-      def : Pat<(vti.Mask (setcc (vti.Vector (SplatPatKind xop_kind:$rs2)),
+      def : Pat<(vti.Mask (setcc (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))),
                                  (vti.Vector vti.RegClass:$rs1), invcc)),
                 (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>;
     }
@@ -408,7 +408,7 @@ multiclass VPatWidenBinarySDNode_VV_VX<SDNode op, PatFrags extop1, PatFrags exto
                 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX)
                    vti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>;
       def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))),
-                    (wti.Vector (extop2 (vti.Vector (SplatPat GPR:$rs1))))),
+                    (wti.Vector (extop2 (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))),
                 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX)
                    vti.RegClass:$rs2, GPR:$rs1, vti.AVL, vti.Log2SEW)>;
     }
@@ -428,7 +428,7 @@ multiclass VPatWidenBinarySDNode_WV_WX<SDNode op, PatFrags extop,
                    wti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW,
                    TAIL_AGNOSTIC)>;
       def : Pat<(op (wti.Vector wti.RegClass:$rs2),
-                    (wti.Vector (extop (vti.Vector (SplatPat GPR:$rs1))))),
+                    (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))),
                 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX)
                    wti.RegClass:$rs2, GPR:$rs1, vti.AVL, vti.Log2SEW)>;
     }
@@ -464,7 +464,7 @@ multiclass VPatWidenMulAddSDNode_VX<PatFrags extop1, PatFrags extop2, string ins
                                  GetVTypePredicates<wti>.Predicates) in
     def : Pat<
       (add (wti.Vector wti.RegClass:$rd),
-        (mul_oneuse (wti.Vector (extop1 (vti.Vector (SplatPat GPR:$rs1)))),
+        (mul_oneuse (wti.Vector (extop1 (vti.Vector (SplatPat (XLenVT GPR:$rs1))))),
                     (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs2))))),
       (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX)
         wti.RegClass:$rd, GPR:$rs1, vti.RegClass:$rs2,
@@ -713,7 +713,7 @@ defm : VPatBinarySDNode_VV_VX<sub, "PseudoVSUB">;
 // pattern operands
 foreach vti = AllIntegerVectors in {
   let Predicates = GetVTypePredicates<vti>.Predicates in {
-    def : Pat<(sub (vti.Vector (SplatPat GPR:$rs2)),
+    def : Pat<(sub (vti.Vector (SplatPat (XLenVT GPR:$rs2))),
                    (vti.Vector vti.RegClass:$rs1)),
               (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX)
                    vti.RegClass:$rs1, GPR:$rs2, vti.AVL, vti.Log2SEW)>;

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 5aeec81a79af5..053e701d0be8a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -1205,7 +1205,7 @@ multiclass VPatBinaryExtVL_WV_WX<SDNode op, PatFrags extop, string instruction_n
         (vti.Vector
           (riscv_trunc_vector_vl
             (op (wti.Vector wti.RegClass:$rs2),
-                (wti.Vector (extop (vti.Vector (SplatPat GPR:$rs1))))),
+                (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))),
             (vti.Mask true_mask),
             VLOpFrag)),
         (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX)
@@ -1337,7 +1337,7 @@ multiclass VPatNarrowShiftSplatExt_WX<SDNode op, PatFrags extop, string instruct
       (vti.Vector
         (riscv_trunc_vector_vl
           (op (wti.Vector wti.RegClass:$rs2),
-              (wti.Vector (extop (vti.Vector (SplatPat GPR:$rs1)),
+              (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1))),
                                  (vti.Mask true_mask), VLOpFrag)),
           srcvalue, (wti.Mask true_mask), VLOpFrag),
         (vti.Mask true_mask), VLOpFrag)),
@@ -2309,7 +2309,7 @@ foreach vti = AllFloatVectors in {
                                              (vti.Scalar (fpimm0)),
                                              VLOpFrag)),
               (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX)
-                  vti.RegClass:$merge, X0, GPR:$vl, vti.Log2SEW)>;
+                  vti.RegClass:$merge, (XLenVT X0), GPR:$vl, vti.Log2SEW)>;
     def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge),
                                              (vti.Scalar (SelectFPImm (XLenVT GPR:$imm))),
                                              VLOpFrag)),

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXTHead.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXTHead.td
index d2a519868f1e1..58e64e4aec9f9 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXTHead.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXTHead.td
@@ -519,7 +519,7 @@ multiclass VPatTernaryVMAQA_VV_VX<string intrinsic, string instruction,
 // Pseudo-instructions and codegen patterns
 //===----------------------------------------------------------------------===//
 let Predicates = [HasVendorXTHeadBa] in {
-def : Pat<(add GPR:$rs1, (shl GPR:$rs2, uimm2:$uimm2)),
+def : Pat<(add (XLenVT GPR:$rs1), (shl GPR:$rs2, uimm2:$uimm2)),
           (TH_ADDSL GPR:$rs1, GPR:$rs2, uimm2:$uimm2)>;
 
 // Reuse complex patterns from StdExtZba
@@ -549,18 +549,18 @@ def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 40)), GPR:$rs2),
 def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 72)), GPR:$rs2),
           (TH_ADDSL GPR:$rs2, (TH_ADDSL GPR:$rs1, GPR:$rs1, 3), 3)>;
 
-def : Pat<(add GPR:$r, CSImm12MulBy4:$i),
-          (TH_ADDSL GPR:$r, (ADDI X0, (SimmShiftRightBy2XForm CSImm12MulBy4:$i)), 2)>;
-def : Pat<(add GPR:$r, CSImm12MulBy8:$i),
-          (TH_ADDSL GPR:$r, (ADDI X0, (SimmShiftRightBy3XForm CSImm12MulBy8:$i)), 3)>;
+def : Pat<(add (XLenVT GPR:$r), CSImm12MulBy4:$i),
+          (TH_ADDSL GPR:$r, (ADDI (XLenVT X0), (SimmShiftRightBy2XForm CSImm12MulBy4:$i)), 2)>;
+def : Pat<(add (XLenVT GPR:$r), CSImm12MulBy8:$i),
+          (TH_ADDSL GPR:$r, (ADDI (XLenVT X0), (SimmShiftRightBy3XForm CSImm12MulBy8:$i)), 3)>;
 
-def : Pat<(mul GPR:$r, C3LeftShift:$i),
+def : Pat<(mul (XLenVT GPR:$r), C3LeftShift:$i),
           (SLLI (TH_ADDSL GPR:$r, GPR:$r, 1),
                 (TrailingZeros C3LeftShift:$i))>;
-def : Pat<(mul GPR:$r, C5LeftShift:$i),
+def : Pat<(mul (XLenVT GPR:$r), C5LeftShift:$i),
           (SLLI (TH_ADDSL GPR:$r, GPR:$r, 2),
                 (TrailingZeros C5LeftShift:$i))>;
-def : Pat<(mul GPR:$r, C9LeftShift:$i),
+def : Pat<(mul (XLenVT GPR:$r), C9LeftShift:$i),
           (SLLI (TH_ADDSL GPR:$r, GPR:$r, 3),
                 (TrailingZeros C9LeftShift:$i))>;
 
@@ -596,14 +596,14 @@ let Predicates = [HasVendorXTHeadBb] in {
 def : PatGprImm<rotr, TH_SRRI, uimmlog2xlen>;
 // There's no encoding for a rotate-left-immediate in X-THead-Bb, as
 // it can be implemented with th.srri by negating the immediate.
-def : Pat<(rotl GPR:$rs1, uimmlog2xlen:$shamt),
+def : Pat<(rotl (XLenVT GPR:$rs1), uimmlog2xlen:$shamt),
           (TH_SRRI GPR:$rs1, (ImmSubFromXLen uimmlog2xlen:$shamt))>;
-def : Pat<(sext_inreg GPR:$rs1, i32), (TH_EXT GPR:$rs1, 31, 0)>;
-def : Pat<(sext_inreg GPR:$rs1, i16), (TH_EXT GPR:$rs1, 15, 0)>;
-def : Pat<(sext_inreg GPR:$rs1, i8), (TH_EXT GPR:$rs1, 7, 0)>;
-def : Pat<(sext_inreg GPR:$rs1, i1), (TH_EXT GPR:$rs1, 0, 0)>;
+def : Pat<(sext_inreg (XLenVT GPR:$rs1), i32), (TH_EXT GPR:$rs1, 31, 0)>;
+def : Pat<(sext_inreg (XLenVT GPR:$rs1), i16), (TH_EXT GPR:$rs1, 15, 0)>;
+def : Pat<(sext_inreg (XLenVT GPR:$rs1), i8), (TH_EXT GPR:$rs1, 7, 0)>;
+def : Pat<(sext_inreg (XLenVT GPR:$rs1), i1), (TH_EXT GPR:$rs1, 0, 0)>;
 def : PatGpr<ctlz, TH_FF1>;
-def : Pat<(ctlz (xor GPR:$rs1, -1)), (TH_FF0 GPR:$rs1)>;
+def : Pat<(XLenVT (ctlz (xor (XLenVT GPR:$rs1), -1))), (TH_FF0 GPR:$rs1)>;
 def : PatGpr<bswap, TH_REV>;
 } // Predicates = [HasVendorXTHeadBb]
 
@@ -620,37 +620,39 @@ def : Pat<(riscv_clzw i64:$rs1),
 } // Predicates = [HasVendorXTHeadBb, IsRV64]
 
 let Predicates = [HasVendorXTHeadBs] in {
-def : Pat<(and (srl GPR:$rs1, uimmlog2xlen:$shamt), 1),
+def : Pat<(and (srl (XLenVT GPR:$rs1), uimmlog2xlen:$shamt), 1),
           (TH_TST GPR:$rs1, uimmlog2xlen:$shamt)>;
-def : Pat<(seteq (and GPR:$rs1, SingleBitSetMask:$mask), 0),
+def : Pat<(XLenVT (seteq (and (XLenVT GPR:$rs1), SingleBitSetMask:$mask), 0)),
           (TH_TST (XORI GPR:$rs1, -1), SingleBitSetMask:$mask)>;
 } // Predicates = [HasVendorXTHeadBs]
 
 let Predicates = [HasVendorXTHeadCondMov] in {
-def : Pat<(select GPR:$cond, GPR:$a, GPR:$b),
+def : Pat<(select (XLenVT GPR:$cond), (XLenVT GPR:$a), (XLenVT GPR:$b)),
           (TH_MVEQZ GPR:$a, GPR:$b, GPR:$cond)>;
-def : Pat<(select GPR:$cond, GPR:$a, (XLenVT 0)),
-          (TH_MVEQZ GPR:$a, X0, GPR:$cond)>;
-def : Pat<(select GPR:$cond, (XLenVT 0), GPR:$b),
-          (TH_MVNEZ GPR:$b, X0, GPR:$cond)>;
+def : Pat<(select (XLenVT GPR:$cond), (XLenVT GPR:$a), (XLenVT 0)),
+          (TH_MVEQZ GPR:$a, (XLenVT X0), GPR:$cond)>;
+def : Pat<(select (XLenVT GPR:$cond), (XLenVT 0), (XLenVT GPR:$b)),
+          (TH_MVNEZ GPR:$b, (XLenVT X0), GPR:$cond)>;
 
-def : Pat<(select (riscv_seteq GPR:$cond), GPR:$a, GPR:$b),
+def : Pat<(select (riscv_seteq (XLenVT GPR:$cond)), (XLenVT GPR:$a), (XLenVT GPR:$b)),
           (TH_MVNEZ GPR:$a, GPR:$b, GPR:$cond)>;
-def : Pat<(select (riscv_setne GPR:$cond), GPR:$a, GPR:$b),
+def : Pat<(select (riscv_setne (XLenVT GPR:$cond)), (XLenVT GPR:$a), (XLenVT GPR:$b)),
           (TH_MVEQZ GPR:$a, GPR:$b, GPR:$cond)>;
-def : Pat<(select (riscv_seteq GPR:$cond), GPR:$a, (XLenVT 0)),
-          (TH_MVNEZ GPR:$a, X0, GPR:$cond)>;
-def : Pat<(select (riscv_setne GPR:$cond), GPR:$a, (XLenVT 0)),
-          (TH_MVEQZ GPR:$a, X0, GPR:$cond)>;
-def : Pat<(select (riscv_seteq GPR:$cond), (XLenVT 0), GPR:$b),
-          (TH_MVEQZ GPR:$b, X0, GPR:$cond)>;
-def : Pat<(select (riscv_setne GPR:$cond),  (XLenVT 0), GPR:$b),
-          (TH_MVNEZ GPR:$b, X0, GPR:$cond)>;
+def : Pat<(select (riscv_seteq (XLenVT GPR:$cond)), (XLenVT GPR:$a), (XLenVT 0)),
+          (TH_MVNEZ GPR:$a, (XLenVT X0), GPR:$cond)>;
+def : Pat<(select (riscv_setne (XLenVT GPR:$cond)), (XLenVT GPR:$a), (XLenVT 0)),
+          (TH_MVEQZ GPR:$a, (XLenVT X0), GPR:$cond)>;
+def : Pat<(select (riscv_seteq (XLenVT GPR:$cond)), (XLenVT 0), (XLenVT GPR:$b)),
+          (TH_MVEQZ GPR:$b, (XLenVT X0), GPR:$cond)>;
+def : Pat<(select (riscv_setne (XLenVT GPR:$cond)),  (XLenVT 0), (XLenVT GPR:$b)),
+          (TH_MVNEZ GPR:$b, (XLenVT X0), GPR:$cond)>;
 } // Predicates = [HasVendorXTHeadCondMov]
 
 let Predicates = [HasVendorXTHeadMac] in {
-def : Pat<(add GPR:$rd, (mul GPR:$rs1, GPR:$rs2)), (TH_MULA GPR:$rd, GPR:$rs1, GPR:$rs2)>;
-def : Pat<(sub GPR:$rd, (mul GPR:$rs1, GPR:$rs2)), (TH_MULS GPR:$rd, GPR:$rs1, GPR:$rs2)>;
+def : Pat<(add GPR:$rd, (mul (XLenVT GPR:$rs1), (XLenVT GPR:$rs2))),
+          (TH_MULA GPR:$rd, GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sub GPR:$rd, (mul (XLenVT GPR:$rs1), (XLenVT GPR:$rs2))),
+          (TH_MULS GPR:$rd, GPR:$rs1, GPR:$rs2)>;
 } // Predicates = [HasVendorXTHeadMac]
 
 let Predicates = [HasVendorXTHeadMac, IsRV64] in {
@@ -661,19 +663,21 @@ def : Pat<(binop_allwusers<sub> GPR:$rd, (mul GPR:$rs1, GPR:$rs2)),
           (TH_MULSW GPR:$rd, GPR:$rs1, GPR:$rs2)>;
 // mulah, mulsh produce a sign-extended result.
 def : Pat<(binop_allwusers<add> GPR:$rd, (mul
-            (sexti16 GPR:$rs1),
-            (sexti16 GPR:$rs2))),
+            (sexti16 (i64 GPR:$rs1)),
+            (sexti16 (i64 GPR:$rs2)))),
           (TH_MULAH GPR:$rd, GPR:$rs1, GPR:$rs2)>;
 def : Pat<(binop_allwusers<sub> GPR:$rd, (mul
-            (sexti16 GPR:$rs1),
-            (sexti16 GPR:$rs2))),
+            (sexti16 (i64 GPR:$rs1)),
+            (sexti16 (i64 GPR:$rs2)))),
           (TH_MULSH GPR:$rd, GPR:$rs1, GPR:$rs2)>;
 } // Predicates = [HasVendorXTHeadMac, IsRV64]
 
 let Predicates = [HasVendorXTHeadMac, IsRV32] in {
-def : Pat<(i32 (add GPR:$rd, (mul (sexti16 GPR:$rs1), (sexti16 GPR:$rs2)))),
+def : Pat<(i32 (add GPR:$rd, (mul (sexti16 (i32 GPR:$rs1)),
+                                  (sexti16 (i32 GPR:$rs2))))),
           (TH_MULAH GPR:$rd, GPR:$rs1, GPR:$rs2)>;
-def : Pat<(i32 (sub GPR:$rd, (mul (sexti16 GPR:$rs1), (sexti16 GPR:$rs2)))),
+def : Pat<(i32 (sub GPR:$rd, (mul (sexti16 (i32 GPR:$rs1)),
+                                  (sexti16 (i32 GPR:$rs2))))),
           (TH_MULSH GPR:$rd, GPR:$rs1, GPR:$rs2)>;
 } // Predicates = [HasVendorXTHeadMac, IsRV32]
 
@@ -760,26 +764,26 @@ def AddrRegZextRegScale
                      [], [], 10>;
 
 multiclass LdIdxPat<PatFrag LoadOp, RVInst Inst, ValueType vt = XLenVT> {
-def : Pat<(vt (LoadOp (AddrRegRegScale GPR:$rs1, GPR:$rs2, uimm2:$uimm2))),
+def : Pat<(vt (LoadOp (AddrRegRegScale (XLenVT GPR:$rs1), (XLenVT GPR:$rs2), uimm2:$uimm2))),
           (Inst GPR:$rs1, GPR:$rs2, uimm2:$uimm2)>;
 }
 
 multiclass LdZextIdxPat<PatFrag LoadOp, RVInst Inst, ValueType vt = i64> {
-def : Pat<(vt (LoadOp (AddrRegZextRegScale (i64 GPR:$rs1), GPR:$rs2, uimm2:$uimm2))),
+def : Pat<(vt (LoadOp (AddrRegZextRegScale (i64 GPR:$rs1), (i64 GPR:$rs2), uimm2:$uimm2))),
           (Inst GPR:$rs1, GPR:$rs2, uimm2:$uimm2)>;
 }
 
 multiclass StIdxPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy,
                     ValueType vt = XLenVT> {
 def : Pat<(StoreOp (vt StTy:$rd),
-            (AddrRegRegScale GPR:$rs1, GPR:$rs2, uimm2:$uimm2)),
+            (AddrRegRegScale (XLenVT GPR:$rs1), (XLenVT GPR:$rs2), uimm2:$uimm2)),
           (Inst StTy:$rd, GPR:$rs1, GPR:$rs2, uimm2:$uimm2)>;
 }
 
 multiclass StZextIdxPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy,
                         ValueType vt = i64> {
 def : Pat<(StoreOp (vt StTy:$rd),
-            (AddrRegZextRegScale (i64 GPR:$rs1), GPR:$rs2, uimm2:$uimm2)),
+            (AddrRegZextRegScale (i64 GPR:$rs1), (i64 GPR:$rs2), uimm2:$uimm2)),
           (Inst StTy:$rd, GPR:$rs1, GPR:$rs2, uimm2:$uimm2)>;
 }
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td
index 247f6b49e6a99..07cf086979f22 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td
@@ -30,33 +30,33 @@ def VT_MASKCN : VTMaskedMove<0b111, "vt.maskcn">,
 
 let Predicates = [IsRV64, HasVendorXVentanaCondOps] in {
 // Directly use MASKC/MASKCN in case of any of the operands being 0.
-def : Pat<(select GPR:$rc, GPR:$rs1, (i64 0)),
+def : Pat<(select (i64 GPR:$rc), GPR:$rs1, (i64 0)),
           (VT_MASKC GPR:$rs1, GPR:$rc)>;
-def : Pat<(select GPR:$rc, (i64 0), GPR:$rs1),
+def : Pat<(select (i64 GPR:$rc), (i64 0), GPR:$rs1),
           (VT_MASKCN GPR:$rs1, GPR:$rc)>;
 
-def : Pat<(select (riscv_setne GPR:$rc), GPR:$rs1, (i64 0)),
+def : Pat<(select (riscv_setne (i64 GPR:$rc)), GPR:$rs1, (i64 0)),
           (VT_MASKC GPR:$rs1, GPR:$rc)>;
-def : Pat<(select (riscv_seteq GPR:$rc), GPR:$rs1, (i64 0)),
+def : Pat<(select (riscv_seteq (i64 GPR:$rc)), GPR:$rs1, (i64 0)),
           (VT_MASKCN GPR:$rs1, GPR:$rc)>;
-def : Pat<(select (riscv_setne GPR:$rc), (i64 0), GPR:$rs1),
+def : Pat<(select (riscv_setne (i64 GPR:$rc)), (i64 0), GPR:$rs1),
           (VT_MASKCN GPR:$rs1, GPR:$rc)>;
-def : Pat<(select (riscv_seteq GPR:$rc), (i64 0), GPR:$rs1),
+def : Pat<(select (riscv_seteq (i64 GPR:$rc)), (i64 0), GPR:$rs1),
           (VT_MASKC GPR:$rs1, GPR:$rc)>;
 
 // Conditional AND operation patterns.
-def : Pat<(i64 (select GPR:$rc, (and GPR:$rs1, GPR:$rs2), GPR:$rs1)),
+def : Pat<(i64 (select (i64 GPR:$rc), (and GPR:$rs1, GPR:$rs2), GPR:$rs1)),
           (OR (AND $rs1, $rs2), (VT_MASKCN $rs1, $rc))>;
-def : Pat<(i64 (select GPR:$rc, GPR:$rs1, (and GPR:$rs1, GPR:$rs2))),
+def : Pat<(i64 (select (i64 GPR:$rc), GPR:$rs1, (and GPR:$rs1, GPR:$rs2))),
           (OR (AND $rs1, $rs2), (VT_MASKC $rs1, $rc))>;
 
 // Basic select pattern that selects between 2 registers.
-def : Pat<(i64 (select GPR:$rc, GPR:$rs1, GPR:$rs2)),
+def : Pat<(i64 (select (i64 GPR:$rc), GPR:$rs1, GPR:$rs2)),
           (OR (VT_MASKC $rs1, $rc), (VT_MASKCN $rs2, $rc))>;
 
-def : Pat<(i64 (select (riscv_setne GPR:$rc), GPR:$rs1, GPR:$rs2)),
+def : Pat<(i64 (select (riscv_setne (i64 GPR:$rc)), GPR:$rs1, GPR:$rs2)),
           (OR (VT_MASKC GPR:$rs1, GPR:$rc), (VT_MASKCN GPR:$rs2, GPR:$rc))>;
-def : Pat<(i64 (select (riscv_seteq GPR:$rc), GPR:$rs2, GPR:$rs1)),
+def : Pat<(i64 (select (riscv_seteq (i64 GPR:$rc)), GPR:$rs2, GPR:$rs1)),
           (OR (VT_MASKC GPR:$rs1, GPR:$rc), (VT_MASKCN GPR:$rs2, GPR:$rc))>;
 
 } // Predicates = [IsRV64, HasVendorXVentanaCondOps]

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index c3cdb0d5376de..a19faf776bf8d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -502,9 +502,9 @@ def : InstAlias<"bext $rd, $rs1, $shamt",
 //===----------------------------------------------------------------------===//
 
 let Predicates = [HasStdExtZbbOrZbkb] in {
-def : Pat<(and GPR:$rs1, (not GPR:$rs2)), (ANDN GPR:$rs1, GPR:$rs2)>;
-def : Pat<(or  GPR:$rs1, (not GPR:$rs2)), (ORN  GPR:$rs1, GPR:$rs2)>;
-def : Pat<(xor GPR:$rs1, (not GPR:$rs2)), (XNOR GPR:$rs1, GPR:$rs2)>;
+def : Pat<(XLenVT (and GPR:$rs1, (not GPR:$rs2))), (ANDN GPR:$rs1, GPR:$rs2)>;
+def : Pat<(XLenVT (or  GPR:$rs1, (not GPR:$rs2))), (ORN  GPR:$rs1, GPR:$rs2)>;
+def : Pat<(XLenVT (xor GPR:$rs1, (not GPR:$rs2))), (XNOR GPR:$rs1, GPR:$rs2)>;
 } // Predicates = [HasStdExtZbbOrZbkb]
 
 let Predicates = [HasStdExtZbbOrZbkb] in {
@@ -514,7 +514,7 @@ def : PatGprGpr<shiftop<rotr>, ROR>;
 def : PatGprImm<rotr, RORI, uimmlog2xlen>;
 // There's no encoding for roli in the the 'B' extension as it can be
 // implemented with rori by negating the immediate.
-def : Pat<(rotl GPR:$rs1, uimmlog2xlen:$shamt),
+def : Pat<(XLenVT (rotl GPR:$rs1, uimmlog2xlen:$shamt)),
           (RORI GPR:$rs1, (ImmSubFromXLen uimmlog2xlen:$shamt))>;
 } // Predicates = [HasStdExtZbbOrZbkb]
 
@@ -527,48 +527,49 @@ def : Pat<(riscv_rolw GPR:$rs1, uimm5:$rs2),
 } // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
 
 let Predicates = [HasStdExtZbs] in {
-def : Pat<(and (not (shiftop<shl> 1, GPR:$rs2)), GPR:$rs1),
+def : Pat<(XLenVT (and (not (shiftop<shl> 1, (XLenVT GPR:$rs2))), GPR:$rs1)),
           (BCLR GPR:$rs1, GPR:$rs2)>;
-def : Pat<(and (rotl -2, GPR:$rs2), GPR:$rs1), (BCLR GPR:$rs1, GPR:$rs2)>;
-def : Pat<(or (shiftop<shl> 1, GPR:$rs2), GPR:$rs1),
+def : Pat<(XLenVT (and (rotl -2, (XLenVT GPR:$rs2)), GPR:$rs1)),
+          (BCLR GPR:$rs1, GPR:$rs2)>;
+def : Pat<(XLenVT (or (shiftop<shl> 1, (XLenVT GPR:$rs2)), GPR:$rs1)),
           (BSET GPR:$rs1, GPR:$rs2)>;
-def : Pat<(xor (shiftop<shl> 1, GPR:$rs2), GPR:$rs1),
+def : Pat<(XLenVT (xor (shiftop<shl> 1, (XLenVT GPR:$rs2)), GPR:$rs1)),
           (BINV GPR:$rs1, GPR:$rs2)>;
-def : Pat<(and (shiftop<srl> GPR:$rs1, GPR:$rs2), 1),
+def : Pat<(XLenVT (and (shiftop<srl> GPR:$rs1, (XLenVT GPR:$rs2)), 1)),
           (BEXT GPR:$rs1, GPR:$rs2)>;
 
-def : Pat<(shiftop<shl> 1, GPR:$rs2),
-          (BSET X0, GPR:$rs2)>;
+def : Pat<(XLenVT (shiftop<shl> 1, (XLenVT GPR:$rs2))),
+          (BSET (XLenVT X0), GPR:$rs2)>;
 
-def : Pat<(and GPR:$rs1, BCLRMask:$mask),
+def : Pat<(XLenVT (and GPR:$rs1, BCLRMask:$mask)),
           (BCLRI GPR:$rs1, BCLRMask:$mask)>;
-def : Pat<(or GPR:$rs1, SingleBitSetMask:$mask),
+def : Pat<(XLenVT (or GPR:$rs1, SingleBitSetMask:$mask)),
           (BSETI GPR:$rs1, SingleBitSetMask:$mask)>;
-def : Pat<(xor GPR:$rs1, SingleBitSetMask:$mask),
+def : Pat<(XLenVT (xor GPR:$rs1, SingleBitSetMask:$mask)),
           (BINVI GPR:$rs1, SingleBitSetMask:$mask)>;
 
-def : Pat<(and (srl GPR:$rs1, uimmlog2xlen:$shamt), (XLenVT 1)),
+def : Pat<(XLenVT (and (srl GPR:$rs1, uimmlog2xlen:$shamt), (XLenVT 1))),
           (BEXTI GPR:$rs1, uimmlog2xlen:$shamt)>;
 
-def : Pat<(seteq (and GPR:$rs1, SingleBitSetMask:$mask), 0),
+def : Pat<(XLenVT (seteq (XLenVT (and GPR:$rs1, SingleBitSetMask:$mask)), 0)),
           (BEXTI (XORI GPR:$rs1, -1), SingleBitSetMask:$mask)>;
 
-def : Pat<(or GPR:$r, BSETINVTwoBitsMask:$i),
+def : Pat<(XLenVT (or GPR:$r, BSETINVTwoBitsMask:$i)),
           (BSETI (BSETI GPR:$r, (TrailingZeros BSETINVTwoBitsMask:$i)),
                  (BSETINVTwoBitsMaskHigh BSETINVTwoBitsMask:$i))>;
-def : Pat<(xor GPR:$r, BSETINVTwoBitsMask:$i),
+def : Pat<(XLenVT (xor GPR:$r, BSETINVTwoBitsMask:$i)),
           (BINVI (BINVI GPR:$r, (TrailingZeros BSETINVTwoBitsMask:$i)),
                  (BSETINVTwoBitsMaskHigh BSETINVTwoBitsMask:$i))>;
-def : Pat<(or GPR:$r, BSETINVORIMask:$i),
+def : Pat<(XLenVT (or GPR:$r, BSETINVORIMask:$i)),
           (BSETI (ORI GPR:$r, (BSETINVORIMaskLow BSETINVORIMask:$i)),
                  (BSETINVTwoBitsMaskHigh BSETINVORIMask:$i))>;
-def : Pat<(xor GPR:$r, BSETINVORIMask:$i),
+def : Pat<(XLenVT (xor GPR:$r, BSETINVORIMask:$i)),
           (BINVI (XORI GPR:$r, (BSETINVORIMaskLow BSETINVORIMask:$i)),
                  (BSETINVTwoBitsMaskHigh BSETINVORIMask:$i))>;
-def : Pat<(and GPR:$r, BCLRITwoBitsMask:$i),
+def : Pat<(XLenVT (and GPR:$r, BCLRITwoBitsMask:$i)),
           (BCLRI (BCLRI GPR:$r, (BCLRITwoBitsMaskLow BCLRITwoBitsMask:$i)),
                  (BCLRITwoBitsMaskHigh BCLRITwoBitsMask:$i))>;
-def : Pat<(and GPR:$r, BCLRIANDIMask:$i),
+def : Pat<(XLenVT (and GPR:$r, BCLRIANDIMask:$i)),
           (BCLRI (ANDI GPR:$r, (BCLRIANDIMaskLow BCLRIANDIMask:$i)),
                  (BCLRITwoBitsMaskHigh BCLRIANDIMask:$i))>;
 } // Predicates = [HasStdExtZbs]
@@ -597,12 +598,12 @@ def : PatGpr<riscv_ctzw, CTZW>;
 def : Pat<(i64 (ctpop (i64 (zexti32 (i64 GPR:$rs1))))), (CPOPW GPR:$rs1)>;
 
 def : Pat<(i64 (riscv_absw GPR:$rs1)),
-          (MAX GPR:$rs1, (SUBW X0, GPR:$rs1))>;
+          (MAX GPR:$rs1, (SUBW (XLenVT X0), GPR:$rs1))>;
 } // Predicates = [HasStdExtZbb, IsRV64]
 
 let Predicates = [HasStdExtZbb] in {
-def : Pat<(sext_inreg GPR:$rs1, i8), (SEXT_B GPR:$rs1)>;
-def : Pat<(sext_inreg GPR:$rs1, i16), (SEXT_H GPR:$rs1)>;
+def : Pat<(XLenVT (sext_inreg GPR:$rs1, i8)), (SEXT_B GPR:$rs1)>;
+def : Pat<(XLenVT (sext_inreg GPR:$rs1, i16)), (SEXT_H GPR:$rs1)>;
 } // Predicates = [HasStdExtZbb]
 
 let Predicates = [HasStdExtZbb] in {
@@ -620,29 +621,29 @@ def : PatGpr<bswap, REV8_RV64, i64>;
 
 let Predicates = [HasStdExtZbkb] in {
 def : Pat<(or (and (shl GPR:$rs2, (XLenVT 8)), 0xFFFF),
-              (zexti8 GPR:$rs1)),
+              (zexti8 (XLenVT GPR:$rs1))),
           (PACKH GPR:$rs1, GPR:$rs2)>;
-def : Pat<(or (shl (zexti8 GPR:$rs2), (XLenVT 8)),
-              (zexti8 GPR:$rs1)),
+def : Pat<(or (shl (zexti8 (XLenVT GPR:$rs2)), (XLenVT 8)),
+              (zexti8 (XLenVT GPR:$rs1))),
           (PACKH GPR:$rs1, GPR:$rs2)>;
 def : Pat<(and (or (shl GPR:$rs2, (XLenVT 8)),
-                   (zexti8 GPR:$rs1)), 0xFFFF),
+                   (zexti8 (XLenVT GPR:$rs1))), 0xFFFF),
           (PACKH GPR:$rs1, GPR:$rs2)>;
 } // Predicates = [HasStdExtZbkb]
 
 let Predicates = [HasStdExtZbkb, IsRV32] in
-def : Pat<(i32 (or (zexti16 GPR:$rs1), (shl GPR:$rs2, (i32 16)))),
+def : Pat<(i32 (or (zexti16 (i32 GPR:$rs1)), (shl GPR:$rs2, (i32 16)))),
           (PACK GPR:$rs1, GPR:$rs2)>;
 
 let Predicates = [HasStdExtZbkb, IsRV64] in {
-def : Pat<(i64 (or (zexti32 GPR:$rs1), (shl GPR:$rs2, (i64 32)))),
+def : Pat<(i64 (or (zexti32 (i64 GPR:$rs1)), (shl GPR:$rs2, (i64 32)))),
           (PACK GPR:$rs1, GPR:$rs2)>;
 
 def : Pat<(binop_allwusers<or> (shl GPR:$rs2, (i64 16)),
-                               (zexti16 GPR:$rs1)),
+                               (zexti16 (i64 GPR:$rs1))),
           (PACKW GPR:$rs1, GPR:$rs2)>;
 def : Pat<(i64 (or (sext_inreg (shl GPR:$rs2, (i64 16)), i32),
-                   (zexti16 GPR:$rs1))),
+                   (zexti16 (i64 GPR:$rs1)))),
           (PACKW GPR:$rs1, GPR:$rs2)>;
 } // Predicates = [HasStdExtZbkb, IsRV64]
 
@@ -686,20 +687,20 @@ def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 40)), GPR:$rs2),
 def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 72)), GPR:$rs2),
           (SH3ADD (SH3ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>;
 
-def : Pat<(add GPR:$r, CSImm12MulBy4:$i),
-          (SH2ADD (ADDI X0, (SimmShiftRightBy2XForm CSImm12MulBy4:$i)),
+def : Pat<(add (XLenVT GPR:$r), CSImm12MulBy4:$i),
+          (SH2ADD (ADDI (XLenVT X0), (SimmShiftRightBy2XForm CSImm12MulBy4:$i)),
                   GPR:$r)>;
-def : Pat<(add GPR:$r, CSImm12MulBy8:$i),
-          (SH3ADD (ADDI X0, (SimmShiftRightBy3XForm CSImm12MulBy8:$i)),
+def : Pat<(add (XLenVT GPR:$r), CSImm12MulBy8:$i),
+          (SH3ADD (ADDI (XLenVT X0), (SimmShiftRightBy3XForm CSImm12MulBy8:$i)),
                   GPR:$r)>;
 
-def : Pat<(mul GPR:$r, C3LeftShift:$i),
+def : Pat<(mul (XLenVT GPR:$r), C3LeftShift:$i),
           (SLLI (SH1ADD GPR:$r, GPR:$r),
                 (TrailingZeros C3LeftShift:$i))>;
-def : Pat<(mul GPR:$r, C5LeftShift:$i),
+def : Pat<(mul (XLenVT GPR:$r), C5LeftShift:$i),
           (SLLI (SH2ADD GPR:$r, GPR:$r),
                 (TrailingZeros C5LeftShift:$i))>;
-def : Pat<(mul GPR:$r, C9LeftShift:$i),
+def : Pat<(mul (XLenVT GPR:$r), C9LeftShift:$i),
           (SLLI (SH3ADD GPR:$r, GPR:$r),
                 (TrailingZeros C9LeftShift:$i))>;
 
@@ -738,7 +739,7 @@ def : Pat<(i64 (and GPR:$rs1, Shifted32OnesMask:$mask)),
 
 def : Pat<(i64 (add (and GPR:$rs1, 0xFFFFFFFF), non_imm12:$rs2)),
           (ADD_UW GPR:$rs1, GPR:$rs2)>;
-def : Pat<(i64 (and GPR:$rs, 0xFFFFFFFF)), (ADD_UW GPR:$rs, X0)>;
+def : Pat<(i64 (and GPR:$rs, 0xFFFFFFFF)), (ADD_UW GPR:$rs, (XLenVT X0))>;
 
 def : Pat<(i64 (add (shl (and GPR:$rs1, 0xFFFFFFFF), (i64 1)), non_imm12:$rs2)),
           (SH1ADD_UW GPR:$rs1, GPR:$rs2)>;

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfa.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfa.td
index 81333172c3126..9eb069b513415 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfa.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfa.td
@@ -212,10 +212,10 @@ def: Pat<(any_ffloor FPR32:$rs1), (FROUND_S FPR32:$rs1, FRM_RDN)>;
 def: Pat<(any_fceil FPR32:$rs1), (FROUND_S FPR32:$rs1, FRM_RUP)>;
 def: Pat<(any_ftrunc FPR32:$rs1), (FROUND_S FPR32:$rs1, FRM_RTZ)>;
 
-def: PatSetCC<FPR32, strict_fsetcc, SETLT, FLTQ_S>;
-def: PatSetCC<FPR32, strict_fsetcc, SETOLT, FLTQ_S>;
-def: PatSetCC<FPR32, strict_fsetcc, SETLE, FLEQ_S>;
-def: PatSetCC<FPR32, strict_fsetcc, SETOLE, FLEQ_S>;
+def: PatSetCC<FPR32, strict_fsetcc, SETLT, FLTQ_S, f32>;
+def: PatSetCC<FPR32, strict_fsetcc, SETOLT, FLTQ_S, f32>;
+def: PatSetCC<FPR32, strict_fsetcc, SETLE, FLEQ_S, f32>;
+def: PatSetCC<FPR32, strict_fsetcc, SETOLE, FLEQ_S, f32>;
 } // Predicates = [HasStdExtZfa]
 
 let Predicates = [HasStdExtZfa, HasStdExtD] in {
@@ -235,10 +235,10 @@ def: Pat<(any_ffloor FPR64:$rs1), (FROUND_D FPR64:$rs1, FRM_RDN)>;
 def: Pat<(any_fceil FPR64:$rs1), (FROUND_D FPR64:$rs1, FRM_RUP)>;
 def: Pat<(any_ftrunc FPR64:$rs1), (FROUND_D FPR64:$rs1, FRM_RTZ)>;
 
-def: PatSetCC<FPR64, strict_fsetcc, SETLT, FLTQ_D>;
-def: PatSetCC<FPR64, strict_fsetcc, SETOLT, FLTQ_D>;
-def: PatSetCC<FPR64, strict_fsetcc, SETLE, FLEQ_D>;
-def: PatSetCC<FPR64, strict_fsetcc, SETOLE, FLEQ_D>;
+def: PatSetCC<FPR64, strict_fsetcc, SETLT, FLTQ_D, f64>;
+def: PatSetCC<FPR64, strict_fsetcc, SETOLT, FLTQ_D, f64>;
+def: PatSetCC<FPR64, strict_fsetcc, SETLE, FLEQ_D, f64>;
+def: PatSetCC<FPR64, strict_fsetcc, SETOLE, FLEQ_D, f64>;
 } // Predicates = [HasStdExtZfa, HasStdExtD]
 
 let Predicates = [HasStdExtZfa, HasStdExtD, IsRV32] in {
@@ -263,8 +263,8 @@ def: Pat<(any_ffloor FPR16:$rs1), (FROUND_H FPR16:$rs1, FRM_RDN)>;
 def: Pat<(any_fceil FPR16:$rs1), (FROUND_H FPR16:$rs1, FRM_RUP)>;
 def: Pat<(any_ftrunc FPR16:$rs1), (FROUND_H FPR16:$rs1, FRM_RTZ)>;
 
-def: PatSetCC<FPR16, strict_fsetcc, SETLT, FLTQ_H>;
-def: PatSetCC<FPR16, strict_fsetcc, SETOLT, FLTQ_H>;
-def: PatSetCC<FPR16, strict_fsetcc, SETLE, FLEQ_H>;
-def: PatSetCC<FPR16, strict_fsetcc, SETOLE, FLEQ_H>;
+def: PatSetCC<FPR16, strict_fsetcc, SETLT, FLTQ_H, f16>;
+def: PatSetCC<FPR16, strict_fsetcc, SETOLT, FLTQ_H, f16>;
+def: PatSetCC<FPR16, strict_fsetcc, SETLE, FLEQ_H, f16>;
+def: PatSetCC<FPR16, strict_fsetcc, SETOLE, FLEQ_H, f16>;
 } // Predicates = [HasStdExtZfa, HasStdExtZfh]

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
index 5d02043c8c19d..4f6f6ca18b744 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
@@ -356,59 +356,59 @@ foreach Ext = ZfhExts in {
 
 // Match non-signaling FEQ_D
 foreach Ext = ZfhExts in {
-  defm : PatSetCC_m<any_fsetcc,    SETEQ,  FEQ_H,            Ext>;
-  defm : PatSetCC_m<any_fsetcc,    SETOEQ, FEQ_H,            Ext>;
-  defm : PatSetCC_m<strict_fsetcc, SETLT,  PseudoQuietFLT_H, Ext>;
-  defm : PatSetCC_m<strict_fsetcc, SETOLT, PseudoQuietFLT_H, Ext>;
-  defm : PatSetCC_m<strict_fsetcc, SETLE,  PseudoQuietFLE_H, Ext>;
-  defm : PatSetCC_m<strict_fsetcc, SETOLE, PseudoQuietFLE_H, Ext>;
+  defm : PatSetCC_m<any_fsetcc,    SETEQ,  FEQ_H,            Ext, f16>;
+  defm : PatSetCC_m<any_fsetcc,    SETOEQ, FEQ_H,            Ext, f16>;
+  defm : PatSetCC_m<strict_fsetcc, SETLT,  PseudoQuietFLT_H, Ext, f16>;
+  defm : PatSetCC_m<strict_fsetcc, SETOLT, PseudoQuietFLT_H, Ext, f16>;
+  defm : PatSetCC_m<strict_fsetcc, SETLE,  PseudoQuietFLE_H, Ext, f16>;
+  defm : PatSetCC_m<strict_fsetcc, SETOLE, PseudoQuietFLE_H, Ext, f16>;
 }
 
 let Predicates = [HasStdExtZfh] in {
 // Match signaling FEQ_H
-def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs2, SETEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR16:$rs1, FPR16:$rs2, SETEQ)),
           (AND (FLE_H $rs1, $rs2),
                (FLE_H $rs2, $rs1))>;
-def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs2, SETOEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR16:$rs1, FPR16:$rs2, SETOEQ)),
           (AND (FLE_H $rs1, $rs2),
                (FLE_H $rs2, $rs1))>;
 // If both operands are the same, use a single FLE.
-def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs1, SETEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR16:$rs1, FPR16:$rs1, SETEQ)),
           (FLE_H $rs1, $rs1)>;
-def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs1, SETOEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR16:$rs1, FPR16:$rs1, SETOEQ)),
           (FLE_H $rs1, $rs1)>;
 } // Predicates = [HasStdExtZfh]
 
 let Predicates = [HasStdExtZhinx] in {
 // Match signaling FEQ_H
-def : Pat<(strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs2, SETEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs2, SETEQ)),
           (AND (FLE_H_INX $rs1, $rs2),
                (FLE_H_INX $rs2, $rs1))>;
-def : Pat<(strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs2, SETOEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs2, SETOEQ)),
           (AND (FLE_H_INX $rs1, $rs2),
                (FLE_H_INX $rs2, $rs1))>;
 // If both operands are the same, use a single FLE.
-def : Pat<(strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs1, SETEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs1, SETEQ)),
           (FLE_H_INX $rs1, $rs1)>;
-def : Pat<(strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs1, SETOEQ),
+def : Pat<(XLenVT (strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs1, SETOEQ)),
           (FLE_H_INX $rs1, $rs1)>;
 } // Predicates = [HasStdExtZhinx]
 
 foreach Ext = ZfhExts in {
-  defm : PatSetCC_m<any_fsetccs, SETLT,  FLT_H, Ext>;
-  defm : PatSetCC_m<any_fsetccs, SETOLT, FLT_H, Ext>;
-  defm : PatSetCC_m<any_fsetccs, SETLE,  FLE_H, Ext>;
-  defm : PatSetCC_m<any_fsetccs, SETOLE, FLE_H, Ext>;
+  defm : PatSetCC_m<any_fsetccs, SETLT,  FLT_H, Ext, f16>;
+  defm : PatSetCC_m<any_fsetccs, SETOLT, FLT_H, Ext, f16>;
+  defm : PatSetCC_m<any_fsetccs, SETLE,  FLE_H, Ext, f16>;
+  defm : PatSetCC_m<any_fsetccs, SETOLE, FLE_H, Ext, f16>;
 }
 
 let Predicates = [HasStdExtZfh] in {
-defm Select_FPR16 : SelectCC_GPR_rrirr<FPR16>;
+defm Select_FPR16 : SelectCC_GPR_rrirr<FPR16, f16>;
 
 def PseudoFROUND_H : PseudoFROUND<FPR16>;
 } // Predicates = [HasStdExtZfh]
 
 let Predicates = [HasStdExtZhinx] in {
-defm Select_FPR16INX : SelectCC_GPR_rrirr<FPR16INX>;
+defm Select_FPR16INX : SelectCC_GPR_rrirr<FPR16INX, f16>;
 
 def PseudoFROUND_H_INX : PseudoFROUND<FPR16INX>;
 } // Predicates = [HasStdExtZhinx]

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZicond.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZicond.td
index 1dfde82bac118..42b77be680427 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZicond.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZicond.td
@@ -30,32 +30,32 @@ def CZERO_NEZ : ALU_rr<0b0000111, 0b111, "czero.nez">,
 
 let Predicates = [HasStdExtZicond] in {
 // Directly use CZERO_EQZ/CZERO_NEZ in case of any of the operands being 0.
-def : Pat<(select GPR:$rc, GPR:$rs1, 0),
+def : Pat<(XLenVT (select (XLenVT GPR:$rc), GPR:$rs1, 0)),
           (CZERO_EQZ GPR:$rs1, GPR:$rc)>;
-def : Pat<(select GPR:$rc, 0, GPR:$rs1),
+def : Pat<(XLenVT (select (XLenVT GPR:$rc), 0, GPR:$rs1)),
           (CZERO_NEZ GPR:$rs1, GPR:$rc)>;
 
-def : Pat<(select (riscv_setne GPR:$rc), GPR:$rs1, 0),
+def : Pat<(XLenVT (select (riscv_setne (XLenVT GPR:$rc)), GPR:$rs1, 0)),
           (CZERO_EQZ GPR:$rs1, GPR:$rc)>;
-def : Pat<(select (riscv_seteq GPR:$rc), GPR:$rs1, 0),
+def : Pat<(XLenVT (select (riscv_seteq (XLenVT GPR:$rc)), GPR:$rs1, 0)),
           (CZERO_NEZ GPR:$rs1, GPR:$rc)>;
-def : Pat<(select (riscv_setne GPR:$rc), 0, GPR:$rs1),
+def : Pat<(XLenVT (select (riscv_setne (XLenVT GPR:$rc)), 0, GPR:$rs1)),
           (CZERO_NEZ GPR:$rs1, GPR:$rc)>;
-def : Pat<(select (riscv_seteq GPR:$rc), 0, GPR:$rs1),
+def : Pat<(XLenVT (select (riscv_seteq (XLenVT GPR:$rc)), 0, GPR:$rs1)),
           (CZERO_EQZ GPR:$rs1, GPR:$rc)>;
 
 // Conditional AND operation patterns.
-def : Pat<(select GPR:$rc, (and GPR:$rs1, GPR:$rs2), GPR:$rs1),
+def : Pat<(XLenVT (select (XLenVT GPR:$rc), (and GPR:$rs1, GPR:$rs2), GPR:$rs1)),
           (OR (AND $rs1, $rs2), (CZERO_NEZ $rs1, $rc))>;
-def : Pat<(select GPR:$rc, GPR:$rs1, (and GPR:$rs1, GPR:$rs2)),
+def : Pat<(XLenVT (select (XLenVT GPR:$rc), GPR:$rs1, (and GPR:$rs1, GPR:$rs2))),
           (OR (AND $rs1, $rs2), (CZERO_EQZ $rs1, $rc))>;
 
 // Basic select pattern that selects between 2 registers.
-def : Pat<(select GPR:$rc, GPR:$rs1, GPR:$rs2),
+def : Pat<(XLenVT (select (XLenVT GPR:$rc), GPR:$rs1, GPR:$rs2)),
           (OR (CZERO_EQZ $rs1, $rc), (CZERO_NEZ $rs2, $rc))>;
 
-def : Pat<(select (riscv_setne GPR:$rc), GPR:$rs1, GPR:$rs2),
+def : Pat<(XLenVT (select (riscv_setne (XLenVT GPR:$rc)), GPR:$rs1, GPR:$rs2)),
           (OR (CZERO_EQZ GPR:$rs1, GPR:$rc), (CZERO_NEZ GPR:$rs2, GPR:$rc))>;
-def : Pat<(select (riscv_seteq GPR:$rc), GPR:$rs2, GPR:$rs1),
+def : Pat<(XLenVT (select (riscv_seteq (XLenVT GPR:$rc)), GPR:$rs2, GPR:$rs1)),
           (OR (CZERO_EQZ GPR:$rs1, GPR:$rc), (CZERO_NEZ GPR:$rs2, GPR:$rc))>;
 } // Predicates = [HasStdExtZicond]

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td
index e7278bf392aec..c5502171e5bf9 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td
@@ -134,7 +134,7 @@ def SM3P1 : RVKUnary<0b000100001001, 0b001, "sm3p1">;
 //===----------------------------------------------------------------------===//
 
 class PatGprGprByteSelect<SDPatternOperator OpNode, RVInst Inst>
-    : Pat<(OpNode GPR:$rs1, GPR:$rs2, byteselect:$imm),
+    : Pat<(XLenVT (OpNode (XLenVT GPR:$rs1), (XLenVT GPR:$rs2), byteselect:$imm)),
           (Inst GPR:$rs1, GPR:$rs2, byteselect:$imm)>;
 
 // Zknd

diff  --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 120b9191cc58b..3d006cefe8bb2 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -115,13 +115,16 @@ let RegAltNameIndices = [ABIRegAltName] in {
 
 def XLenVT : ValueTypeByHwMode<[RV32, RV64],
                                [i32,  i64]>;
+// Floating point class with XLen bits.
+def XLenFVT : ValueTypeByHwMode<[RV32, RV64],
+                                [f32,  f64]>;
 def XLenRI : RegInfoByHwMode<
       [RV32,              RV64],
       [RegInfo<32,32,32>, RegInfo<64,64,64>]>;
 
 // The order of registers represents the preferred allocation sequence.
 // Registers are listed in the order caller-save, callee-save, specials.
-def GPR : RegisterClass<"RISCV", [XLenVT], 32, (add
+def GPR : RegisterClass<"RISCV", [XLenVT, XLenFVT], 32, (add
     (sequence "X%u", 10, 17),
     (sequence "X%u", 5, 7),
     (sequence "X%u", 28, 31),
@@ -132,15 +135,15 @@ def GPR : RegisterClass<"RISCV", [XLenVT], 32, (add
   let RegInfos = XLenRI;
 }
 
-def GPRX0 : RegisterClass<"RISCV", [XLenVT], 32, (add X0)> {
+def GPRX0 : RegisterClass<"RISCV", [XLenVT, XLenFVT], 32, (add X0)> {
   let RegInfos = XLenRI;
 }
 
-def GPRNoX0 : RegisterClass<"RISCV", [XLenVT], 32, (sub GPR, X0)> {
+def GPRNoX0 : RegisterClass<"RISCV", [XLenVT, XLenFVT], 32, (sub GPR, X0)> {
   let RegInfos = XLenRI;
 }
 
-def GPRNoX0X2 : RegisterClass<"RISCV", [XLenVT], 32, (sub GPR, X0, X2)> {
+def GPRNoX0X2 : RegisterClass<"RISCV", [XLenVT, XLenFVT], 32, (sub GPR, X0, X2)> {
   let RegInfos = XLenRI;
 }
 
@@ -148,11 +151,11 @@ def GPRNoX0X2 : RegisterClass<"RISCV", [XLenVT], 32, (sub GPR, X0, X2)> {
 // stack on some microarchitectures. Also remove the reserved registers X0, X2,
 // X3, and X4 as it reduces the number of register classes that get synthesized
 // by tablegen.
-def GPRJALR : RegisterClass<"RISCV", [XLenVT], 32, (sub GPR, (sequence "X%u", 0, 5))> {
+def GPRJALR : RegisterClass<"RISCV", [XLenVT, XLenFVT], 32, (sub GPR, (sequence "X%u", 0, 5))> {
   let RegInfos = XLenRI;
 }
 
-def GPRC : RegisterClass<"RISCV", [XLenVT], 32, (add
+def GPRC : RegisterClass<"RISCV", [XLenVT, XLenFVT], 32, (add
     (sequence "X%u", 10, 15),
     (sequence "X%u", 8, 9)
   )> {
@@ -163,7 +166,7 @@ def GPRC : RegisterClass<"RISCV", [XLenVT], 32, (add
 // restored to the saved value before the tail call, which would clobber a call
 // address. We shouldn't use x5 since that is a hint for to pop the return
 // address stack on some microarchitectures.
-def GPRTC : RegisterClass<"RISCV", [XLenVT], 32, (add
+def GPRTC : RegisterClass<"RISCV", [XLenVT, XLenFVT], 32, (add
     (sequence "X%u", 6, 7),
     (sequence "X%u", 10, 17),
     (sequence "X%u", 28, 31)
@@ -171,12 +174,12 @@ def GPRTC : RegisterClass<"RISCV", [XLenVT], 32, (add
   let RegInfos = XLenRI;
 }
 
-def SP : RegisterClass<"RISCV", [XLenVT], 32, (add X2)> {
+def SP : RegisterClass<"RISCV", [XLenVT, XLenFVT], 32, (add X2)> {
   let RegInfos = XLenRI;
 }
 
 // Saved Registers from s0 to s7, for C.MVA01S07 instruction in Zcmp extension
-def SR07 : RegisterClass<"RISCV", [XLenVT], 32, (add
+def SR07 : RegisterClass<"RISCV", [XLenVT, XLenFVT], 32, (add
     (sequence "X%u", 8, 9),
     (sequence "X%u", 18, 23)
   )> {
@@ -542,7 +545,6 @@ def VMV0 : RegisterClass<"RISCV", VMaskVTs, 64, (add V0)> {
 let RegInfos = XLenRI in {
 def GPRF16  : RegisterClass<"RISCV", [f16], 16, (add GPR)>;
 def GPRF32  : RegisterClass<"RISCV", [f32], 32, (add GPR)>;
-def GPRF64  : RegisterClass<"RISCV", [f64], 64, (add GPR)>;
 } // RegInfos = XLenRI
 
 let RegAltNameIndices = [ABIRegAltName] in {


        


More information about the llvm-commits mailing list