[llvm] 9ab9c1a - [RISCV] Generate Xqcilsm multi-word load/store instructions for three or more words (#174789)

via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 7 23:23:37 PST 2026


Author: Sudharsan Veeravalli
Date: 2026-01-08T12:53:33+05:30
New Revision: 9ab9c1a1e2fb70eba5e45b2018e6f3d62e3f8a14

URL: https://github.com/llvm/llvm-project/commit/9ab9c1a1e2fb70eba5e45b2018e6f3d62e3f8a14
DIFF: https://github.com/llvm/llvm-project/commit/9ab9c1a1e2fb70eba5e45b2018e6f3d62e3f8a14.diff

LOG: [RISCV] Generate Xqcilsm multi-word load/store instructions for three or more words (#174789)

This patch adds support for generating the `Xqcilsm` multi-word
load/store instructions for three or more words. We add a new function
in the `RISCVLoadStoreOptimizer` pass for doing this separate from the
one that does load store pairing. The reason for this is that the
implementation currently only looks for consecutive loads and stores to
merge where as the pairing logic has no such restriction. We also only
traverse the basic block top down for now while looking for instructions
to merge.

Added: 
    llvm/test/CodeGen/RISCV/xqcilsm-lwmi-swmi-multiple.mir

Modified: 
    llvm/lib/Target/RISCV/RISCVLoadStoreOptimizer.cpp
    llvm/test/CodeGen/RISCV/xqcilsm-memset.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVLoadStoreOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVLoadStoreOptimizer.cpp
index 63b50913ce74b..e5c0812931e66 100644
--- a/llvm/lib/Target/RISCV/RISCVLoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVLoadStoreOptimizer.cpp
@@ -26,6 +26,7 @@
 
 #include "RISCV.h"
 #include "RISCVTargetMachine.h"
+#include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/Statistic.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/CodeGen/Passes.h"
@@ -73,6 +74,7 @@ struct RISCVLoadStoreOpt : public MachineFunctionPass {
   bool tryConvertToXqcilsmLdStPair(MachineFunction *MF,
                                    MachineBasicBlock::iterator First,
                                    MachineBasicBlock::iterator Second);
+  bool tryConvertToXqcilsmMultiLdSt(MachineBasicBlock::iterator &First);
   bool tryConvertToMIPSLdStPair(MachineFunction *MF,
                                 MachineBasicBlock::iterator First,
                                 MachineBasicBlock::iterator Second);
@@ -99,6 +101,7 @@ struct RISCVLoadStoreOpt : public MachineFunctionPass {
   MachineRegisterInfo *MRI;
   const RISCVInstrInfo *TII;
   const RISCVRegisterInfo *TRI;
+  const RISCVSubtarget *STI = nullptr;
   LiveRegUnits ModifiedRegUnits, UsedRegUnits;
 };
 } // end anonymous namespace
@@ -110,17 +113,17 @@ INITIALIZE_PASS(RISCVLoadStoreOpt, DEBUG_TYPE, RISCV_LOAD_STORE_OPT_NAME, false,
 bool RISCVLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
   if (skipFunction(Fn.getFunction()))
     return false;
-  const RISCVSubtarget &Subtarget = Fn.getSubtarget<RISCVSubtarget>();
 
   bool MadeChange = false;
-  TII = Subtarget.getInstrInfo();
-  TRI = Subtarget.getRegisterInfo();
+  STI = &Fn.getSubtarget<RISCVSubtarget>();
+  TII = STI->getInstrInfo();
+  TRI = STI->getRegisterInfo();
   MRI = &Fn.getRegInfo();
   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
   ModifiedRegUnits.init(*TRI);
   UsedRegUnits.init(*TRI);
 
-  if (Subtarget.useMIPSLoadStorePairs() || Subtarget.hasVendorXqcilsm()) {
+  if (STI->useMIPSLoadStorePairs() || STI->hasVendorXqcilsm()) {
     for (MachineBasicBlock &MBB : Fn) {
       LLVM_DEBUG(dbgs() << "MBB: " << MBB.getName() << "\n");
 
@@ -135,7 +138,7 @@ bool RISCVLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
     }
   }
 
-  if (!Subtarget.is64Bit() && Subtarget.hasStdExtZilsd()) {
+  if (!STI->is64Bit() && STI->hasStdExtZilsd()) {
     for (auto &MBB : Fn) {
       for (auto MBBI = MBB.begin(), E = MBB.end(); MBBI != E;) {
         if (fixInvalidRegPairOp(MBB, MBBI)) {
@@ -163,6 +166,12 @@ bool RISCVLoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
   if (!TII->isLdStSafeToPair(MI, TRI))
     return false;
 
+  // If Xqcilsm is available, first try to form a multi-instruction group (>2).
+  if (!STI->is64Bit() && STI->hasVendorXqcilsm()) {
+    if (tryConvertToXqcilsmMultiLdSt(MBBI))
+      return true;
+  }
+
   // Look ahead for a pairable instruction.
   MachineBasicBlock::iterator E = MI.getParent()->end();
   bool MergeForward;
@@ -174,6 +183,201 @@ bool RISCVLoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
   return false;
 }
 
+static bool isMemOpAligned(MachineInstr &MI, Align RequiredAlignment) {
+  const MachineMemOperand *MMO = *MI.memoperands_begin();
+  Align MMOAlign = MMO->getAlign();
+  return MMOAlign >= RequiredAlignment;
+}
+
+// Convert set of 3 or more LW/SW instructions to QC_LWMI/QC_SWMI/QC_SETWMI.
+// For now this only handles consecutive loads and stores traversing the basic
+// block top-down.
+// TODO: Traverse the basic block bottom-up as well.
+bool RISCVLoadStoreOpt::tryConvertToXqcilsmMultiLdSt(
+    MachineBasicBlock::iterator &FirstIt) {
+  MachineInstr &FirstMI = *FirstIt;
+  MachineFunction *MF = FirstMI.getMF();
+
+  if (STI->is64Bit() || !STI->hasVendorXqcilsm())
+    return false;
+
+  unsigned Opc = FirstMI.getOpcode();
+  if (Opc != RISCV::LW && Opc != RISCV::SW)
+    return false;
+
+  if (!FirstMI.hasOneMemOperand())
+    return false;
+
+  if (!isMemOpAligned(FirstMI, Align(4)))
+    return false;
+
+  // Require simple reg+imm addressing.
+  const MachineOperand &BaseOp = FirstMI.getOperand(1);
+  const MachineOperand &OffOp = FirstMI.getOperand(2);
+  if (!BaseOp.isReg() || !OffOp.isImm())
+    return false;
+
+  Register Base = BaseOp.getReg();
+  int64_t BaseOff = OffOp.getImm();
+
+  if (!isShiftedUInt<5, 2>(BaseOff))
+    return false;
+
+  Register StartReg = FirstMI.getOperand(0).getReg();
+  bool IsLoad = (Opc == RISCV::LW);
+
+  // Load rd cannot be x0 and must not clobber the base register.
+  if (IsLoad) {
+    if (StartReg == RISCV::X0)
+      return false;
+    if (StartReg == Base)
+      return false;
+  }
+
+  // Collect a set of consecutive matching instructions.
+  SmallVector<MachineInstr *, 8> Group;
+  Group.push_back(&FirstMI);
+
+  MachineBasicBlock::iterator E = FirstIt->getParent()->end();
+  MachineBasicBlock::iterator It = next_nodbg(FirstIt, E);
+  int64_t ExpectedOff = BaseOff + 4;
+  unsigned Index = 1;
+  enum class StoreMode { Unknown, Setwmi, Swmi };
+  StoreMode SMode = StoreMode::Unknown;
+
+  while (It != E) {
+    MachineInstr &MI = *It;
+
+    if (!TII->isPairableLdStInstOpc(MI.getOpcode()))
+      break;
+    if (MI.getOpcode() != Opc)
+      break;
+    if (!TII->isLdStSafeToPair(MI, TRI))
+      break;
+    if (!MI.hasOneMemOperand())
+      break;
+    if (!isMemOpAligned(MI, Align(4)))
+      break;
+
+    const MachineOperand &BaseMIOp = MI.getOperand(1);
+    const MachineOperand &OffsetMIOp = MI.getOperand(2);
+    if (!BaseMIOp.isReg() || !OffsetMIOp.isImm())
+      break;
+    if (BaseMIOp.getReg() != Base)
+      break;
+    int64_t Off = OffsetMIOp.getImm();
+    if (Off != ExpectedOff)
+      break;
+
+    Register Reg = MI.getOperand(0).getReg();
+    if (IsLoad) {
+      // For loads, require consecutive destination registers.
+      if (Reg != StartReg + Index)
+        break;
+      if (Reg == Base)
+        break;
+    } else {
+      // For stores, decide mode based on the second instruction and then
+      // enforce the same for the rest.
+      if (SMode == StoreMode::Unknown) {
+        if (Reg == StartReg)
+          SMode = StoreMode::Setwmi;
+        else if (Reg == StartReg + 1)
+          SMode = StoreMode::Swmi;
+        else
+          break;
+      } else if (SMode == StoreMode::Setwmi) {
+        if (Reg != StartReg)
+          break;
+      } else {
+        if (Reg != StartReg + Index)
+          break;
+      }
+    }
+
+    // Passed checks, extend the group.
+    Group.push_back(&MI);
+    ++Index;
+    ExpectedOff += 4;
+    It = next_nodbg(It, E);
+  }
+
+  // We only handle more than 2 here. Pairs are handled in
+  // tryConvertToXqcilsmLdStPair.
+  unsigned Len = Group.size();
+  if (Len < 3 || Len > 31)
+    return false;
+
+  unsigned NewOpc;
+  unsigned StartRegState;
+  bool AddImplicitRegs = true;
+
+  if (IsLoad) {
+    NewOpc = RISCV::QC_LWMI;
+    StartRegState = static_cast<unsigned>(RegState::Define);
+  } else {
+    assert(SMode != StoreMode::Unknown &&
+           "Group should be large enough to know the store mode");
+    if (SMode == StoreMode::Setwmi) {
+      NewOpc = RISCV::QC_SETWMI;
+      // Kill if any of the individual stores killed the reg.
+      bool StartKill = false;
+      for (MachineInstr *MI : Group)
+        StartKill |= MI->getOperand(0).isKill();
+      StartRegState = getKillRegState(StartKill);
+      AddImplicitRegs = false;
+    } else {
+      // SWMI requires consecutive source regs and rd != x0.
+      if (StartReg == RISCV::X0)
+        return false;
+      NewOpc = RISCV::QC_SWMI;
+      StartRegState = getKillRegState(Group.front()->getOperand(0).isKill());
+    }
+  }
+
+  // Aggregate kill on base.
+  bool BaseKill = false;
+  for (MachineInstr *MI : Group)
+    BaseKill |= MI->getOperand(1).isKill();
+
+  // Build the new instruction.
+  DebugLoc DL = FirstMI.getDebugLoc();
+  if (!DL)
+    DL = Group.back()->getDebugLoc();
+  MachineInstrBuilder MIB = BuildMI(*MF, DL, TII->get(NewOpc));
+  MIB.addReg(StartReg, StartRegState)
+      .addReg(Base, getKillRegState(BaseKill))
+      .addImm(Len)
+      .addImm(BaseOff);
+
+  // Merge memory references.
+  MIB.cloneMergedMemRefs(Group);
+
+  if (AddImplicitRegs) {
+    // Add implicit operands for the additional registers.
+    for (unsigned i = 1; i < Len; ++i) {
+      Register R = StartReg + i;
+      unsigned State = 0;
+      if (IsLoad)
+        State = static_cast<unsigned>(RegState::ImplicitDefine);
+      else
+        State = RegState::Implicit |
+                getKillRegState(Group[i]->getOperand(0).isKill());
+      MIB.addReg(R, State);
+    }
+  }
+
+  // Insert before the first instruction and remove all in the group.
+  MachineBasicBlock *MBB = FirstIt->getParent();
+  MachineBasicBlock::iterator NewIt = MBB->insert(FirstIt, MIB);
+  for (MachineInstr *MI : Group)
+    MI->removeFromParent();
+
+  // Advance the cursor to the next non-debug instruction after the group.
+  FirstIt = next_nodbg(NewIt, MBB->end());
+  return true;
+}
+
 bool RISCVLoadStoreOpt::tryConvertToXqcilsmLdStPair(
     MachineFunction *MF, MachineBasicBlock::iterator First,
     MachineBasicBlock::iterator Second) {
@@ -197,10 +401,10 @@ bool RISCVLoadStoreOpt::tryConvertToXqcilsmLdStPair(
   if (Base1 != Base2)
     return false;
 
-  const MachineMemOperand *MMO = *First->memoperands_begin();
-  Align MMOAlign = MMO->getAlign();
+  if (!First->hasOneMemOperand() || !Second->hasOneMemOperand())
+    return false;
 
-  if (MMOAlign < Align(4))
+  if (!isMemOpAligned(*First, Align(4)) || !isMemOpAligned(*Second, Align(4)))
     return false;
 
   auto &FirstOp0 = First->getOperand(0);
@@ -303,10 +507,10 @@ bool RISCVLoadStoreOpt::tryConvertToMIPSLdStPair(
     break;
   }
 
-  const MachineMemOperand *MMO = *First->memoperands_begin();
-  Align MMOAlign = MMO->getAlign();
+  if (!First->hasOneMemOperand())
+    return false;
 
-  if (MMOAlign < RequiredAlignment)
+  if (!isMemOpAligned(*First, RequiredAlignment))
     return false;
 
   int64_t Offset = First->getOperand(2).getImm();
@@ -338,10 +542,9 @@ bool RISCVLoadStoreOpt::tryConvertToMIPSLdStPair(
 bool RISCVLoadStoreOpt::tryConvertToLdStPair(
     MachineBasicBlock::iterator First, MachineBasicBlock::iterator Second) {
   MachineFunction *MF = First->getMF();
-  const RISCVSubtarget &STI = MF->getSubtarget<RISCVSubtarget>();
 
   // Try converting to QC_LWMI/QC_SWMI if the XQCILSM extension is enabled.
-  if (!STI.is64Bit() && STI.hasVendorXqcilsm())
+  if (!STI->is64Bit() && STI->hasVendorXqcilsm())
     return tryConvertToXqcilsmLdStPair(MF, First, Second);
 
   // Else try to convert them into MIPS Paired Loads/Stores.
@@ -538,13 +741,10 @@ RISCVLoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
     First = InsertionPoint;
   }
 
-  MachineFunction *MF = I->getMF();
-  const RISCVSubtarget &STI = MF->getSubtarget<RISCVSubtarget>();
-
   if (tryConvertToLdStPair(First, Second)) {
     LLVM_DEBUG(dbgs() << "Pairing load/store:\n    ");
     LLVM_DEBUG(prev_nodbg(NextI, MBB.begin())->print(dbgs()));
-  } else if (!STI.is64Bit() && STI.hasVendorXqcilsm()) {
+  } else if (!STI->is64Bit() && STI->hasVendorXqcilsm()) {
     // We were unable to form the pair, so use the next non-debug instruction
     // after the first instruction we had wanted to merge.
     NextI = next_nodbg(I, E);

diff  --git a/llvm/test/CodeGen/RISCV/xqcilsm-lwmi-swmi-multiple.mir b/llvm/test/CodeGen/RISCV/xqcilsm-lwmi-swmi-multiple.mir
new file mode 100644
index 0000000000000..f7206e8ebb7dc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/xqcilsm-lwmi-swmi-multiple.mir
@@ -0,0 +1,188 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6
+# RUN: llc -mtriple=riscv32 -mattr=+xqcilsm -run-pass=riscv-load-store-opt %s -o - | FileCheck %s
+
+--- |
+
+  define void @lwmi() { ret void }
+  define void @lwmi_x0() { ret void }
+  define void @lwmi_nonconsecutive() { ret void }
+  define void @lwmi_misaligned() { ret void }
+  define void @swmi() { ret void }
+  define void @swmi_x0() { ret void }
+  define void @swmi_nonconsecutive() { ret void }
+  define void @swmi_misaligned() { ret void}
+
+  define void @setwmi(ptr %a, ptr %b, ptr %c, ptr %d) {
+    entry:
+      store i32 0, ptr %a, align 4
+      store i32 0, ptr %b, align 4
+      store i32 0, ptr %c, align 4
+      store i32 0, ptr %d, align 4
+      ret void
+  }
+
+...
+---
+name:            lwmi
+body:             |
+  bb.0:
+    liveins: $x10
+    ; CHECK-LABEL: name: lwmi
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: $x28 = QC_LWMI $x10, 4, 0, implicit-def $x29, implicit-def $x30, implicit-def $x31 :: (load (s32))
+    ; CHECK-NEXT: $x10 = ADD $x28, $x29
+    ; CHECK-NEXT: $x10 = ADD $x10, $x30
+    ; CHECK-NEXT: $x10 = ADD $x10, $x31
+    ; CHECK-NEXT: PseudoRET
+    $x28 = LW $x10, 0 :: (load (s32), align 4)
+    $x29 = LW $x10, 4 :: (load (s32), align 4)
+    $x30 = LW $x10, 8 :: (load (s32), align 4)
+    $x31 = LW $x10, 12 :: (load (s32), align 4)
+    $x10 = ADD $x28, $x29
+    $x10 = ADD $x10, $x30
+    $x10 = ADD $x10, $x31
+    PseudoRET
+...
+---
+name:            lwmi_x0
+body:             |
+  bb.0:
+    liveins: $x10
+    ; CHECK-LABEL: name: lwmi_x0
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: $x0 = LW $x10, 0 :: (load (s32))
+    ; CHECK-NEXT: $x1 = QC_LWMI killed $x10, 3, 4, implicit-def $x2, implicit-def $x3 :: (load (s32))
+    ; CHECK-NEXT: PseudoRET
+    $x0 = LW $x10, 0 :: (load (s32), align 4)
+    $x1 = LW $x10, 4 :: (load (s32), align 4)
+    $x2 = LW $x10, 8 :: (load (s32), align 4)
+    $x3 = LW killed $x10, 12 :: (load (s32), align 4)
+    PseudoRET
+...
+---
+name:            lwmi_nonconsecutive
+body:             |
+  bb.0:
+    liveins: $x10
+    ; CHECK-LABEL: name: lwmi_nonconsecutive
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: $x28 = LW $x10, 0 :: (load (s32))
+    ; CHECK-NEXT: $x30 = LW $x10, 4 :: (load (s32))
+    ; CHECK-NEXT: $x29 = LW $x10, 8 :: (load (s32))
+    ; CHECK-NEXT: $x31 = LW $x10, 12 :: (load (s32))
+    ; CHECK-NEXT: PseudoRET
+    $x28 = LW $x10, 0 :: (load (s32), align 4)
+    $x29 = LW $x10, 8 :: (load (s32), align 4)
+    $x30 = LW $x10, 4 :: (load (s32), align 4)
+    $x31 = LW $x10, 12 :: (load (s32), align 4)
+    PseudoRET
+...
+---
+name:            lwmi_misaligned
+body:             |
+  bb.0:
+    liveins: $x10
+    ; CHECK-LABEL: name: lwmi_misaligned
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: $x28 = LW $x10, 2 :: (load (s32))
+    ; CHECK-NEXT: $x29 = LW $x10, 6 :: (load (s32))
+    ; CHECK-NEXT: $x30 = LW $x10, 10 :: (load (s32))
+    ; CHECK-NEXT: PseudoRET
+    $x28 = LW $x10, 2 :: (load (s32), align 4)
+    $x29 = LW $x10, 6 :: (load (s32), align 4)
+    $x30 = LW $x10, 10 :: (load (s32), align 4)
+    PseudoRET
+...
+---
+name:            swmi
+body:             |
+  bb.0:
+    liveins: $x10
+    ; CHECK-LABEL: name: swmi
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: QC_SWMI killed $x28, $x10, 4, 0, implicit killed $x29, implicit $x30, implicit killed $x31 :: (store (s32))
+    ; CHECK-NEXT: PseudoRET
+    SW killed $x28, $x10, 0 :: (store (s32), align 4)
+    SW killed $x29, $x10, 4 :: (store (s32), align 4)
+    SW $x30, $x10, 8 :: (store (s32), align 4)
+    SW killed $x31, $x10, 12 :: (store (s32), align 4)
+    PseudoRET
+...
+---
+name:            swmi_x0
+body:             |
+  bb.0:
+    liveins: $x10
+    ; CHECK-LABEL: name: swmi_x0
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: SW $x0, $x10, 0 :: (store (s32))
+    ; CHECK-NEXT: QC_SWMI $x1, $x10, 3, 4, implicit $x2, implicit $x3 :: (store (s32))
+    ; CHECK-NEXT: PseudoRET
+    SW $x0, $x10, 0 :: (store (s32), align 4)
+    SW $x1, $x10, 4 :: (store (s32), align 4)
+    SW $x2, $x10, 8 :: (store (s32), align 4)
+    SW $x3, $x10, 12 :: (store (s32), align 4)
+    PseudoRET
+...
+---
+name:            swmi_nonconsecutive
+body:             |
+  bb.0:
+    liveins: $x10
+    ; CHECK-LABEL: name: swmi_nonconsecutive
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: QC_SWMI $x28, $x10, 2, 0, implicit killed $x29 :: (store (s32))
+    ; CHECK-NEXT: $x11 = ADDI $x28, 1
+    ; CHECK-NEXT: QC_SWMI $x30, $x10, 2, 8, implicit killed $x31 :: (store (s32))
+    ; CHECK-NEXT: $x12 = ADDI $x30, 1
+    ; CHECK-NEXT: PseudoRET
+    SW $x28, $x10, 0 :: (store (s32), align 4)
+    $x11 = ADDI $x28, 1
+    SW killed $x29, $x10, 4 :: (store (s32), align 4)
+    SW $x30, $x10, 8 :: (store (s32), align 4)
+    $x12 = ADDI $x30, 1
+    SW killed $x31, $x10, 12 :: (store (s32), align 4)
+    PseudoRET
+...
+---
+name:            swmi_misaligned
+body:             |
+  bb.0:
+    liveins: $x10
+    ; CHECK-LABEL: name: swmi_misaligned
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: SW $x0, $x10, 0 :: (store (s32))
+    ; CHECK-NEXT: SW $x1, $x10, 4 :: (store (s32))
+    ; CHECK-NEXT: SW $x2, $x10, 8 :: (store (s32), align 2)
+    ; CHECK-NEXT: SW $x3, $x10, 12 :: (store (s32))
+    ; CHECK-NEXT: PseudoRET
+    SW $x0, $x10, 0 :: (store (s32), align 4)
+    SW $x1, $x10, 4 :: (store (s32), align 4)
+    SW $x2, $x10, 8 :: (store (s32), align 2)
+    SW $x3, $x10, 12 :: (store (s32), align 4)
+    PseudoRET
+...
+---
+name:            setwmi
+body:             |
+  bb.0:
+    liveins: $x10
+    ; CHECK-LABEL: name: setwmi
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: QC_SETWMI $x0, $x10, 4, 0 :: (store (s32) into %ir.a), (store (s32) into %ir.b), (store (s32) into %ir.c), (store (s32) into %ir.d)
+    ; CHECK-NEXT: PseudoRET
+    SW $x0, $x10, 0 :: (store (s32) into %ir.a, align 4)
+    SW $x0, $x10, 4 :: (store (s32) into %ir.b, align 4)
+    SW $x0, $x10, 8 :: (store (s32) into %ir.c, align 4)
+    SW $x0, $x10, 12 :: (store (s32) into %ir.d, align 4)
+    PseudoRET
+...

diff  --git a/llvm/test/CodeGen/RISCV/xqcilsm-memset.ll b/llvm/test/CodeGen/RISCV/xqcilsm-memset.ll
index 399d6066c3366..e380122cf6efe 100644
--- a/llvm/test/CodeGen/RISCV/xqcilsm-memset.ll
+++ b/llvm/test/CodeGen/RISCV/xqcilsm-memset.ll
@@ -539,8 +539,7 @@ define void @test7a_unalign() nounwind {
 ; RV32IXQCILSM-NEXT:    addi a0, a0, %lo(arr1)
 ; RV32IXQCILSM-NEXT:    li a1, -1
 ; RV32IXQCILSM-NEXT:    sb a1, 16(a0)
-; RV32IXQCILSM-NEXT:    qc.setwmi a1, 2, 0(a0)
-; RV32IXQCILSM-NEXT:    qc.setwmi a1, 2, 8(a0)
+; RV32IXQCILSM-NEXT:    qc.setwmi a1, 4, 0(a0)
 ; RV32IXQCILSM-NEXT:    ret
 entry:
   tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 -1, i32 17, i1 false)
@@ -654,8 +653,7 @@ define void @test8() nounwind {
 ; RV32IXQCILSM:       # %bb.0: # %entry
 ; RV32IXQCILSM-NEXT:    lui a0, %hi(arr1)
 ; RV32IXQCILSM-NEXT:    addi a0, a0, %lo(arr1)
-; RV32IXQCILSM-NEXT:    qc.setwmi zero, 2, 0(a0)
-; RV32IXQCILSM-NEXT:    qc.setwmi zero, 2, 8(a0)
+; RV32IXQCILSM-NEXT:    qc.setwmi zero, 4, 0(a0)
 ; RV32IXQCILSM-NEXT:    ret
 entry:
   tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 0, i32 16, i1 false)
@@ -681,10 +679,8 @@ define void @test9() nounwind {
 ; RV32IXQCILSM:       # %bb.0: # %entry
 ; RV32IXQCILSM-NEXT:    lui a0, %hi(arr1)
 ; RV32IXQCILSM-NEXT:    addi a0, a0, %lo(arr1)
-; RV32IXQCILSM-NEXT:    qc.setwmi zero, 2, 16(a0)
-; RV32IXQCILSM-NEXT:    qc.setwmi zero, 2, 24(a0)
-; RV32IXQCILSM-NEXT:    qc.setwmi zero, 2, 0(a0)
-; RV32IXQCILSM-NEXT:    qc.setwmi zero, 2, 8(a0)
+; RV32IXQCILSM-NEXT:    qc.setwmi zero, 4, 16(a0)
+; RV32IXQCILSM-NEXT:    qc.setwmi zero, 4, 0(a0)
 ; RV32IXQCILSM-NEXT:    ret
 entry:
   tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 0, i32 32, i1 false)


        


More information about the llvm-commits mailing list