[llvm] bd1561d - [RISCV][GISel] Add manual isel for s8/s16/s32 load/store for the GPR bank. (#161995)

via llvm-commits llvm-commits at lists.llvm.org
Tue Oct 7 21:30:13 PDT 2025


Author: Craig Topper
Date: 2025-10-07T21:30:09-07:00
New Revision: bd1561d5f160a949c0bbf9bfbda3558eb62f98d7

URL: https://github.com/llvm/llvm-project/commit/bd1561d5f160a949c0bbf9bfbda3558eb62f98d7
DIFF: https://github.com/llvm/llvm-project/commit/bd1561d5f160a949c0bbf9bfbda3558eb62f98d7.diff

LOG: [RISCV][GISel] Add manual isel for s8/s16/s32 load/store for the GPR bank. (#161995)

GISel doesn't distinquish integer and FP loads and stores. We only
know which it is after register bank selection. This results in
s16/s32 loads/stores on the GPR register bank that need to be
selected. This required extra isel patterns not needed for SDAG
and adding i16 and i32 to the GPR register class. Having i16/i32
on the GPR register class makes type interfence in tablegen less
effective, requiring explicit casts to be added to patterns. It also
increases the size of RISCVGenDAGISel.inc by 2K.

This patch removes the extra isel patterns and replaces it with custom
instruction selection similar to what is done on AArch64. A future
patch will remove i16 and i32 from the GPR register class.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
    llvm/lib/Target/RISCV/RISCVGISel.td
    llvm/lib/Target/RISCV/RISCVInstrInfo.td
    llvm/lib/Target/RISCV/RISCVInstrInfoA.td
    llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 186fdd13e8c77..53633eac3d2c3 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -675,6 +675,45 @@ static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC,
   CC = getRISCVCCFromICmp(Pred);
 }
 
+/// Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation
+/// \p GenericOpc, appropriate for the GPR register bank and of memory access
+/// size \p OpSize.
+static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
+  const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
+  switch (OpSize) {
+  default:
+    llvm_unreachable("Unexpected memory size");
+  case 8:
+    return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;
+  case 16:
+    return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;
+  case 32:
+    return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;
+  case 64:
+    return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;
+  }
+}
+
+/// Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation
+/// \p GenericOpc, appropriate for the GPR register bank and of memory access
+/// size \p OpSize. \returns \p GenericOpc if the combination is unsupported.
+static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
+  const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
+  switch (OpSize) {
+  case 8:
+    // Prefer unsigned due to no c.lb in Zcb.
+    return IsStore ? RISCV::SB : RISCV::LBU;
+  case 16:
+    return IsStore ? RISCV::SH : RISCV::LH;
+  case 32:
+    return IsStore ? RISCV::SW : RISCV::LW;
+  case 64:
+    return IsStore ? RISCV::SD : RISCV::LD;
+  }
+
+  return GenericOpc;
+}
+
 bool RISCVInstructionSelector::select(MachineInstr &MI) {
   MachineIRBuilder MIB(MI);
 
@@ -892,6 +931,59 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
     return selectImplicitDef(MI, MIB);
   case TargetOpcode::G_UNMERGE_VALUES:
     return selectUnmergeValues(MI, MIB);
+  case TargetOpcode::G_LOAD:
+  case TargetOpcode::G_STORE: {
+    GLoadStore &LdSt = cast<GLoadStore>(MI);
+    const Register ValReg = LdSt.getReg(0);
+    const Register PtrReg = LdSt.getPointerReg();
+    LLT PtrTy = MRI->getType(PtrReg);
+
+    const RegisterBank &RB = *RBI.getRegBank(ValReg, *MRI, TRI);
+    if (RB.getID() != RISCV::GPRBRegBankID)
+      return false;
+
+#ifndef NDEBUG
+    const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, *MRI, TRI);
+    // Check that the pointer register is valid.
+    assert(PtrRB.getID() == RISCV::GPRBRegBankID &&
+           "Load/Store pointer operand isn't a GPR");
+    assert(PtrTy.isPointer() && "Load/Store pointer operand isn't a pointer");
+#endif
+
+    // Can only handle AddressSpace 0.
+    if (PtrTy.getAddressSpace() != 0)
+      return false;
+
+    unsigned MemSize = LdSt.getMemSizeInBits().getValue();
+    AtomicOrdering Order = LdSt.getMMO().getSuccessOrdering();
+
+    if (isStrongerThanMonotonic(Order)) {
+      MI.setDesc(TII.get(selectZalasrLoadStoreOp(Opc, MemSize)));
+      return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
+    }
+
+    const unsigned NewOpc = selectRegImmLoadStoreOp(MI.getOpcode(), MemSize);
+    if (NewOpc == MI.getOpcode())
+      return false;
+
+    // Check if we can fold anything into the addressing mode.
+    auto AddrModeFns = selectAddrRegImm(MI.getOperand(1));
+    if (!AddrModeFns)
+      return false;
+
+    // Folded something. Create a new instruction and return it.
+    auto NewInst = MIB.buildInstr(NewOpc, {}, {}, MI.getFlags());
+    if (isa<GStore>(MI))
+      NewInst.addUse(ValReg);
+    else
+      NewInst.addDef(ValReg);
+    NewInst.cloneMemRefs(MI);
+    for (auto &Fn : *AddrModeFns)
+      Fn(NewInst);
+    MI.eraseFromParent();
+
+    return constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI);
+  }
   default:
     return false;
   }

diff  --git a/llvm/lib/Target/RISCV/RISCVGISel.td b/llvm/lib/Target/RISCV/RISCVGISel.td
index 7dd33854e9e23..eba35ef0a746d 100644
--- a/llvm/lib/Target/RISCV/RISCVGISel.td
+++ b/llvm/lib/Target/RISCV/RISCVGISel.td
@@ -100,65 +100,11 @@ def : LdPat<load, LD, PtrVT>;
 def : StPat<store, SD, GPR, PtrVT>;
 }
 
-// Load and store patterns for i16, needed because Zfh makes s16 load/store
-// legal and regbank select may not constrain registers to FP.
-def : LdPat<load, LH, i16>;
-def : StPat<store, SH, GPR, i16>;
-
-def : LdPat<extloadi8, LBU, i16>; // Prefer unsigned due to no c.lb in Zcb.
-def : StPat<truncstorei8, SB, GPR, i16>;
-
-let Predicates = [HasAtomicLdSt] in {
-  // Prefer unsigned due to no c.lb in Zcb.
-  def : LdPat<relaxed_load<atomic_load_aext_8>,    LBU, i16>;
-  def : LdPat<relaxed_load<atomic_load_nonext_16>, LH,  i16>;
-
-  def : StPat<relaxed_store<atomic_store_8>,  SB, GPR, i16>;
-  def : StPat<relaxed_store<atomic_store_16>, SH, GPR, i16>;
-}
-
-let Predicates = [HasAtomicLdSt, IsRV64] in {
-  // Load pattern is in RISCVInstrInfoA.td and shared with RV32.
-  def : StPat<relaxed_store<atomic_store_32>, SW, GPR, i32>;
-}
-
 //===----------------------------------------------------------------------===//
 // RV64 i32 patterns not used by SelectionDAG
 //===----------------------------------------------------------------------===//
 
 let Predicates = [IsRV64] in {
-def : LdPat<extloadi8, LBU, i32>; // Prefer unsigned due to no c.lb in Zcb.
-def : LdPat<extloadi16, LH, i32>;
-
-def : StPat<truncstorei8, SB, GPR, i32>;
-def : StPat<truncstorei16, SH, GPR, i32>;
-
 def : Pat<(sext_inreg (i64 (add GPR:$rs1, simm12_lo:$imm)), i32),
           (ADDIW GPR:$rs1, simm12_lo:$imm)>;
 }
-
-//===----------------------------------------------------------------------===//
-// Zalasr patterns not used by SelectionDAG
-//===----------------------------------------------------------------------===//
-
-let Predicates = [HasStdExtZalasr] in {
-  // the sequentially consistent loads use
-  //  .aq instead of .aqrl to match the psABI/A.7
-  def : PatLAQ<acquiring_load<atomic_load_aext_8>, LB_AQ, i16>;
-  def : PatLAQ<seq_cst_load<atomic_load_aext_8>, LB_AQ, i16>;
-
-  def : PatLAQ<acquiring_load<atomic_load_nonext_16>, LH_AQ, i16>;
-  def : PatLAQ<seq_cst_load<atomic_load_nonext_16>, LH_AQ, i16>;
-
-  def : PatSRL<releasing_store<atomic_store_8>, SB_RL, i16>;
-  def : PatSRL<seq_cst_store<atomic_store_8>, SB_RL, i16>;
-
-  def : PatSRL<releasing_store<atomic_store_16>, SH_RL, i16>;
-  def : PatSRL<seq_cst_store<atomic_store_16>, SH_RL, i16>;
-}
-
-let Predicates = [HasStdExtZalasr, IsRV64] in {
-  // Load pattern is in RISCVInstrInfoZalasr.td and shared with RV32.
-  def : PatSRL<releasing_store<atomic_store_32>, SW_RL, i32>;
-  def : PatSRL<seq_cst_store<atomic_store_32>, SW_RL, i32>;
-}

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 9855c47a63392..7a149290e8d36 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1980,7 +1980,7 @@ def : LdPat<sextloadi8, LB>;
 def : LdPat<extloadi8, LBU>; // Prefer unsigned due to no c.lb in Zcb.
 def : LdPat<sextloadi16, LH>;
 def : LdPat<extloadi16, LH>;
-def : LdPat<load, LW, i32>;
+def : LdPat<load, LW, i32>, Requires<[IsRV32]>;
 def : LdPat<zextloadi8, LBU>;
 def : LdPat<zextloadi16, LHU>;
 
@@ -1994,7 +1994,7 @@ class StPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy,
 
 def : StPat<truncstorei8, SB, GPR, XLenVT>;
 def : StPat<truncstorei16, SH, GPR, XLenVT>;
-def : StPat<store, SW, GPR, i32>;
+def : StPat<store, SW, GPR, i32>, Requires<[IsRV32]>;
 
 /// Fences
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
index 2e4326f9ed100..8c8369c50df5a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
@@ -174,8 +174,9 @@ let Predicates = [HasAtomicLdSt] in {
   def : StPat<relaxed_store<atomic_store_8>,  SB, GPR, XLenVT>;
   def : StPat<relaxed_store<atomic_store_16>, SH, GPR, XLenVT>;
   def : StPat<relaxed_store<atomic_store_32>, SW, GPR, XLenVT>;
+}
 
-  // Used by GISel for RV32 and RV64.
+let Predicates = [HasAtomicLdSt, IsRV32] in {
   def : LdPat<relaxed_load<atomic_load_nonext_32>, LW, i32>;
 }
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td
index f7ceb0da194ab..5f94403495546 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td
@@ -94,11 +94,12 @@ let Predicates = [HasStdExtZalasr] in {
 
   def : PatSRL<releasing_store<atomic_store_32>, SW_RL>;
   def : PatSRL<seq_cst_store<atomic_store_32>, SW_RL>;
+}
 
-  // Used by GISel for RV32 and RV64.
+let Predicates = [HasStdExtZalasr, IsRV32] in {
   def : PatLAQ<acquiring_load<atomic_load_nonext_32>, LW_AQ, i32>;
   def : PatLAQ<seq_cst_load<atomic_load_nonext_32>, LW_AQ, i32>;
-} // Predicates = [HasStdExtZalasr]
+} // Predicates = [HasStdExtZalasr, IsRV32]
 
 let Predicates = [HasStdExtZalasr, IsRV64] in {
   def : PatLAQ<acquiring_load<atomic_load_asext_32>, LW_AQ, i64>;


        


More information about the llvm-commits mailing list