[llvm] [RISCV][WIP] Enable sink-and-fold for RISC-V. (PR #67602)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 27 13:45:04 PDT 2023
https://github.com/topperc updated https://github.com/llvm/llvm-project/pull/67602
>From fc922af83f8debc37f55bfb7d96d23ff740f89a1 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 27 Sep 2023 13:22:15 -0700
Subject: [PATCH 1/2] [RISCV] Enable sink-and-fold for RISC-V.
This uses the recently introduced sink-and-fold support in
MachineSink. https://reviews.llvm.org/D152828
This enables folding ADDI into load/store addresses.
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 71 +++
llvm/lib/Target/RISCV/RISCVInstrInfo.h | 7 +
llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp | 2 +-
llvm/lib/Target/RISCV/RISCVTargetMachine.cpp | 9 +-
.../RISCV/rvv/fixed-vectors-masked-gather.ll | 411 +++++++-----------
llvm/test/CodeGen/RISCV/split-offsets.ll | 22 +-
llvm/test/CodeGen/RISCV/srem-vector-lkk.ll | 100 ++---
llvm/test/CodeGen/RISCV/urem-vector-lkk.ll | 100 ++---
8 files changed, 363 insertions(+), 359 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 6ee5e2d4c584049..7b9f8b08002c077 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -1907,6 +1907,77 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
return true;
}
+bool RISCVInstrInfo::canFoldIntoAddrMode(const MachineInstr &MemI,
+ Register Reg,
+ const MachineInstr &AddrI,
+ ExtAddrMode &AM) const {
+ switch (MemI.getOpcode()) {
+ default:
+ return false;
+ case RISCV::LB:
+ case RISCV::LBU:
+ case RISCV::LH:
+ case RISCV::LHU:
+ case RISCV::LW:
+ case RISCV::LWU:
+ case RISCV::LD:
+ case RISCV::FLH:
+ case RISCV::FLW:
+ case RISCV::FLD:
+ case RISCV::SB:
+ case RISCV::SH:
+ case RISCV::SW:
+ case RISCV::SD:
+ case RISCV::FSH:
+ case RISCV::FSW:
+ case RISCV::FSD:
+ break;
+ }
+
+ // Check the fold operand is not the loaded/stored value.
+ const MachineOperand &BaseRegOp = MemI.getOperand(0);
+ if (BaseRegOp.isReg() && BaseRegOp.getReg() == Reg)
+ return false;
+
+ if (AddrI.getOpcode() != RISCV::ADDI)
+ return false;
+
+ int64_t OldOffset = MemI.getOperand(2).getImm();
+ int64_t Disp = AddrI.getOperand(2).getImm();
+ int64_t NewOffset = OldOffset + Disp;
+ if (!STI.is64Bit())
+ NewOffset = SignExtend64<32>(NewOffset);
+
+ if (!isInt<12>(NewOffset))
+ return false;
+
+ AM.BaseReg = AddrI.getOperand(1).getReg();
+ AM.ScaledReg = 0;
+ AM.Scale = 0;
+ AM.Displacement = NewOffset;
+ AM.Form = ExtAddrMode::Formula::Basic;
+ return true;
+}
+
+MachineInstr *RISCVInstrInfo::emitLdStWithAddr(MachineInstr &MemI,
+ const ExtAddrMode &AM) const {
+
+ const DebugLoc &DL = MemI.getDebugLoc();
+ MachineBasicBlock &MBB = *MemI.getParent();
+
+ assert(AM.ScaledReg == 0 && AM.Scale == 0 &&
+ "Addressing mode not supported for folding");
+
+ auto B = BuildMI(MBB, MemI, DL, get(MemI.getOpcode()))
+ .addReg(MemI.getOperand(0).getReg(),
+ MemI.mayLoad() ? RegState::Define : 0)
+ .addReg(AM.BaseReg)
+ .addImm(AM.Displacement)
+ .setMemRefs(MemI.memoperands())
+ .setMIFlags(MemI.getFlags());
+ return B.getInstr();
+}
+
// Return true if get the base operand, byte offset of an instruction and the
// memory width. Width is the size of memory that is being loaded/stored.
bool RISCVInstrInfo::getMemOperandWithOffsetWidth(
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 99c907a98121ae3..2f8ea749b96e3d1 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -137,6 +137,13 @@ class RISCVInstrInfo : public RISCVGenInstrInfo {
bool verifyInstruction(const MachineInstr &MI,
StringRef &ErrInfo) const override;
+ bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg,
+ const MachineInstr &AddrI,
+ ExtAddrMode &AM) const override;
+
+ MachineInstr *emitLdStWithAddr(MachineInstr &MemI,
+ const ExtAddrMode &AM) const override;
+
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt,
const MachineOperand *&BaseOp,
int64_t &Offset, unsigned &Width,
diff --git a/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp b/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp
index 7c6a89b6036fa3c..9d7660ba9a4b103 100644
--- a/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp
@@ -275,7 +275,7 @@ bool RISCVInitUndef::processBasicBlock(MachineFunction &MF,
Changed |= handleSubReg(MF, MI, DLD);
if (MI.isImplicitDef()) {
auto DstReg = MI.getOperand(0).getReg();
- if (isVectorRegClass(DstReg))
+ if (DstReg.isVirtual() && isVectorRegClass(DstReg))
Changed |= handleImplicitDef(MBB, I);
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index 69a0569fccc4eca..cbf9d7f3d4872a7 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -78,6 +78,11 @@ static cl::opt<bool> EnableRISCVDeadRegisterElimination(
" them with stores to x0"),
cl::init(true));
+static cl::opt<bool>
+ EnableSinkFold("riscv-enable-sink-fold",
+ cl::desc("Enable sinking and folding of instruction copies"),
+ cl::init(true), cl::Hidden);
+
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() {
RegisterTargetMachine<RISCVTargetMachine> X(getTheRISCV32Target());
RegisterTargetMachine<RISCVTargetMachine> Y(getTheRISCV64Target());
@@ -242,7 +247,9 @@ namespace {
class RISCVPassConfig : public TargetPassConfig {
public:
RISCVPassConfig(RISCVTargetMachine &TM, PassManagerBase &PM)
- : TargetPassConfig(TM, PM) {}
+ : TargetPassConfig(TM, PM) {
+ setEnableSinkAndFold(EnableSinkFold);
+ }
RISCVTargetMachine &getRISCVTargetMachine() const {
return getTM<RISCVTargetMachine>();
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 480e5c2f8f2b8b6..e3b06af20e5ee14 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -12972,38 +12972,39 @@ define <4 x i32> @mgather_narrow_edge_case(ptr %base) {
; RV64ZVE32F-NEXT: vmset.m v8
; RV64ZVE32F-NEXT: vmv.x.s a1, v8
; RV64ZVE32F-NEXT: # implicit-def: $v8
-; RV64ZVE32F-NEXT: bnez zero, .LBB106_2
-; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vlse32.v v8, (a0), zero
-; RV64ZVE32F-NEXT: .LBB106_2: # %else
-; RV64ZVE32F-NEXT: andi a3, a1, 2
-; RV64ZVE32F-NEXT: addi a2, a0, -512
-; RV64ZVE32F-NEXT: bnez a3, .LBB106_6
-; RV64ZVE32F-NEXT: # %bb.3: # %else2
-; RV64ZVE32F-NEXT: andi a3, a1, 4
-; RV64ZVE32F-NEXT: bnez a3, .LBB106_7
-; RV64ZVE32F-NEXT: .LBB106_4: # %else5
+; RV64ZVE32F-NEXT: beqz zero, .LBB106_5
+; RV64ZVE32F-NEXT: # %bb.1: # %else
+; RV64ZVE32F-NEXT: andi a2, a1, 2
+; RV64ZVE32F-NEXT: bnez a2, .LBB106_6
+; RV64ZVE32F-NEXT: .LBB106_2: # %else2
+; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: bnez a2, .LBB106_7
+; RV64ZVE32F-NEXT: .LBB106_3: # %else5
; RV64ZVE32F-NEXT: andi a1, a1, 8
; RV64ZVE32F-NEXT: bnez a1, .LBB106_8
-; RV64ZVE32F-NEXT: .LBB106_5: # %else8
+; RV64ZVE32F-NEXT: .LBB106_4: # %else8
; RV64ZVE32F-NEXT: ret
+; RV64ZVE32F-NEXT: .LBB106_5: # %cond.load
+; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vlse32.v v8, (a0), zero
+; RV64ZVE32F-NEXT: andi a2, a1, 2
+; RV64ZVE32F-NEXT: beqz a2, .LBB106_2
; RV64ZVE32F-NEXT: .LBB106_6: # %cond.load1
-; RV64ZVE32F-NEXT: lw a3, 0(a2)
+; RV64ZVE32F-NEXT: lw a2, -512(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, ma
-; RV64ZVE32F-NEXT: vmv.s.x v9, a3
+; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT: andi a3, a1, 4
-; RV64ZVE32F-NEXT: beqz a3, .LBB106_4
+; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: beqz a2, .LBB106_3
; RV64ZVE32F-NEXT: .LBB106_7: # %cond.load4
-; RV64ZVE32F-NEXT: lw a0, 0(a0)
+; RV64ZVE32F-NEXT: lw a2, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m1, tu, ma
-; RV64ZVE32F-NEXT: vmv.s.x v9, a0
+; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2
; RV64ZVE32F-NEXT: andi a1, a1, 8
-; RV64ZVE32F-NEXT: beqz a1, .LBB106_5
+; RV64ZVE32F-NEXT: beqz a1, .LBB106_4
; RV64ZVE32F-NEXT: .LBB106_8: # %cond.load7
-; RV64ZVE32F-NEXT: lw a0, 0(a2)
+; RV64ZVE32F-NEXT: lw a0, -512(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
@@ -13480,11 +13481,10 @@ define <8 x i16> @mgather_strided_unaligned(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB107_2
; RV64ZVE32F-NEXT: .LBB107_10: # %cond.load1
-; RV64ZVE32F-NEXT: addi a2, a0, 4
-; RV64ZVE32F-NEXT: lbu a3, 1(a2)
-; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: slli a3, a3, 8
-; RV64ZVE32F-NEXT: or a2, a3, a2
+; RV64ZVE32F-NEXT: lbu a2, 5(a0)
+; RV64ZVE32F-NEXT: lbu a3, 4(a0)
+; RV64ZVE32F-NEXT: slli a2, a2, 8
+; RV64ZVE32F-NEXT: or a2, a2, a3
; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma
@@ -13492,64 +13492,58 @@ define <8 x i16> @mgather_strided_unaligned(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: beqz a2, .LBB107_3
; RV64ZVE32F-NEXT: .LBB107_11: # %cond.load4
-; RV64ZVE32F-NEXT: addi a2, a0, 8
-; RV64ZVE32F-NEXT: lbu a3, 1(a2)
-; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: slli a3, a3, 8
-; RV64ZVE32F-NEXT: or a2, a3, a2
+; RV64ZVE32F-NEXT: lbu a2, 9(a0)
+; RV64ZVE32F-NEXT: lbu a3, 8(a0)
+; RV64ZVE32F-NEXT: slli a2, a2, 8
+; RV64ZVE32F-NEXT: or a2, a2, a3
; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB107_4
; RV64ZVE32F-NEXT: .LBB107_12: # %cond.load7
-; RV64ZVE32F-NEXT: addi a2, a0, 12
-; RV64ZVE32F-NEXT: lbu a3, 1(a2)
-; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: slli a3, a3, 8
-; RV64ZVE32F-NEXT: or a2, a3, a2
+; RV64ZVE32F-NEXT: lbu a2, 13(a0)
+; RV64ZVE32F-NEXT: lbu a3, 12(a0)
+; RV64ZVE32F-NEXT: slli a2, a2, 8
+; RV64ZVE32F-NEXT: or a2, a2, a3
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB107_5
; RV64ZVE32F-NEXT: .LBB107_13: # %cond.load10
-; RV64ZVE32F-NEXT: addi a2, a0, 16
-; RV64ZVE32F-NEXT: lbu a3, 1(a2)
-; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: slli a3, a3, 8
-; RV64ZVE32F-NEXT: or a2, a3, a2
+; RV64ZVE32F-NEXT: lbu a2, 17(a0)
+; RV64ZVE32F-NEXT: lbu a3, 16(a0)
+; RV64ZVE32F-NEXT: slli a2, a2, 8
+; RV64ZVE32F-NEXT: or a2, a2, a3
; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 4
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB107_6
; RV64ZVE32F-NEXT: .LBB107_14: # %cond.load13
-; RV64ZVE32F-NEXT: addi a2, a0, 20
-; RV64ZVE32F-NEXT: lbu a3, 1(a2)
-; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: slli a3, a3, 8
-; RV64ZVE32F-NEXT: or a2, a3, a2
+; RV64ZVE32F-NEXT: lbu a2, 21(a0)
+; RV64ZVE32F-NEXT: lbu a3, 20(a0)
+; RV64ZVE32F-NEXT: slli a2, a2, 8
+; RV64ZVE32F-NEXT: or a2, a2, a3
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 5
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: beqz a2, .LBB107_7
; RV64ZVE32F-NEXT: .LBB107_15: # %cond.load16
-; RV64ZVE32F-NEXT: addi a2, a0, 24
-; RV64ZVE32F-NEXT: lbu a3, 1(a2)
-; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: slli a3, a3, 8
-; RV64ZVE32F-NEXT: or a2, a3, a2
+; RV64ZVE32F-NEXT: lbu a2, 25(a0)
+; RV64ZVE32F-NEXT: lbu a3, 24(a0)
+; RV64ZVE32F-NEXT: slli a2, a2, 8
+; RV64ZVE32F-NEXT: or a2, a2, a3
; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 6
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB107_8
; RV64ZVE32F-NEXT: .LBB107_16: # %cond.load19
-; RV64ZVE32F-NEXT: addi a0, a0, 28
-; RV64ZVE32F-NEXT: lbu a1, 1(a0)
-; RV64ZVE32F-NEXT: lbu a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a1, 29(a0)
+; RV64ZVE32F-NEXT: lbu a0, 28(a0)
; RV64ZVE32F-NEXT: slli a1, a1, 8
; RV64ZVE32F-NEXT: or a0, a1, a0
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
@@ -13614,8 +13608,7 @@ define <8 x i16> @mgather_strided_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB108_2
; RV64ZVE32F-NEXT: .LBB108_10: # %cond.load1
-; RV64ZVE32F-NEXT: addi a2, a0, 2
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 2(a0)
; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma
@@ -13623,48 +13616,42 @@ define <8 x i16> @mgather_strided_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: beqz a2, .LBB108_3
; RV64ZVE32F-NEXT: .LBB108_11: # %cond.load4
-; RV64ZVE32F-NEXT: addi a2, a0, 8
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 8(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB108_4
; RV64ZVE32F-NEXT: .LBB108_12: # %cond.load7
-; RV64ZVE32F-NEXT: addi a2, a0, 10
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 10(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB108_5
; RV64ZVE32F-NEXT: .LBB108_13: # %cond.load10
-; RV64ZVE32F-NEXT: addi a2, a0, 16
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 16(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 4
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB108_6
; RV64ZVE32F-NEXT: .LBB108_14: # %cond.load13
-; RV64ZVE32F-NEXT: addi a2, a0, 18
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 18(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 5
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: beqz a2, .LBB108_7
; RV64ZVE32F-NEXT: .LBB108_15: # %cond.load16
-; RV64ZVE32F-NEXT: addi a2, a0, 24
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 24(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 6
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB108_8
; RV64ZVE32F-NEXT: .LBB108_16: # %cond.load19
-; RV64ZVE32F-NEXT: addi a0, a0, 26
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-NEXT: lh a0, 26(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 7
@@ -13730,8 +13717,7 @@ define <8 x i16> @mgather_strided_2xSEW_with_offset(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB109_2
; RV64ZVE32F-NEXT: .LBB109_10: # %cond.load1
-; RV64ZVE32F-NEXT: addi a2, a0, 6
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 6(a0)
; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma
@@ -13739,48 +13725,42 @@ define <8 x i16> @mgather_strided_2xSEW_with_offset(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: beqz a2, .LBB109_3
; RV64ZVE32F-NEXT: .LBB109_11: # %cond.load4
-; RV64ZVE32F-NEXT: addi a2, a0, 12
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 12(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB109_4
; RV64ZVE32F-NEXT: .LBB109_12: # %cond.load7
-; RV64ZVE32F-NEXT: addi a2, a0, 14
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 14(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB109_5
; RV64ZVE32F-NEXT: .LBB109_13: # %cond.load10
-; RV64ZVE32F-NEXT: addi a2, a0, 20
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 20(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 4
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB109_6
; RV64ZVE32F-NEXT: .LBB109_14: # %cond.load13
-; RV64ZVE32F-NEXT: addi a2, a0, 22
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 22(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 5
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: beqz a2, .LBB109_7
; RV64ZVE32F-NEXT: .LBB109_15: # %cond.load16
-; RV64ZVE32F-NEXT: addi a2, a0, 28
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 28(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 6
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB109_8
; RV64ZVE32F-NEXT: .LBB109_16: # %cond.load19
-; RV64ZVE32F-NEXT: addi a0, a0, 30
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-NEXT: lh a0, 30(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 7
@@ -13846,8 +13826,7 @@ define <8 x i16> @mgather_reverse_unit_strided_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB110_2
; RV64ZVE32F-NEXT: .LBB110_10: # %cond.load1
-; RV64ZVE32F-NEXT: addi a2, a0, 30
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 30(a0)
; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma
@@ -13855,48 +13834,42 @@ define <8 x i16> @mgather_reverse_unit_strided_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: beqz a2, .LBB110_3
; RV64ZVE32F-NEXT: .LBB110_11: # %cond.load4
-; RV64ZVE32F-NEXT: addi a2, a0, 24
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 24(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB110_4
; RV64ZVE32F-NEXT: .LBB110_12: # %cond.load7
-; RV64ZVE32F-NEXT: addi a2, a0, 26
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 26(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB110_5
; RV64ZVE32F-NEXT: .LBB110_13: # %cond.load10
-; RV64ZVE32F-NEXT: addi a2, a0, 20
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 20(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 4
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB110_6
; RV64ZVE32F-NEXT: .LBB110_14: # %cond.load13
-; RV64ZVE32F-NEXT: addi a2, a0, 22
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 22(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 5
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: beqz a2, .LBB110_7
; RV64ZVE32F-NEXT: .LBB110_15: # %cond.load16
-; RV64ZVE32F-NEXT: addi a2, a0, 16
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 16(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 6
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB110_8
; RV64ZVE32F-NEXT: .LBB110_16: # %cond.load19
-; RV64ZVE32F-NEXT: addi a0, a0, 18
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-NEXT: lh a0, 18(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 7
@@ -13962,8 +13935,7 @@ define <8 x i16> @mgather_reverse_strided_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB111_2
; RV64ZVE32F-NEXT: .LBB111_10: # %cond.load1
-; RV64ZVE32F-NEXT: addi a2, a0, 30
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 30(a0)
; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma
@@ -13971,48 +13943,42 @@ define <8 x i16> @mgather_reverse_strided_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: beqz a2, .LBB111_3
; RV64ZVE32F-NEXT: .LBB111_11: # %cond.load4
-; RV64ZVE32F-NEXT: addi a2, a0, 20
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 20(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB111_4
; RV64ZVE32F-NEXT: .LBB111_12: # %cond.load7
-; RV64ZVE32F-NEXT: addi a2, a0, 22
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 22(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB111_5
; RV64ZVE32F-NEXT: .LBB111_13: # %cond.load10
-; RV64ZVE32F-NEXT: addi a2, a0, 12
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 12(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 4
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB111_6
; RV64ZVE32F-NEXT: .LBB111_14: # %cond.load13
-; RV64ZVE32F-NEXT: addi a2, a0, 14
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 14(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 5
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: beqz a2, .LBB111_7
; RV64ZVE32F-NEXT: .LBB111_15: # %cond.load16
-; RV64ZVE32F-NEXT: addi a2, a0, 4
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 4(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 6
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB111_8
; RV64ZVE32F-NEXT: .LBB111_16: # %cond.load19
-; RV64ZVE32F-NEXT: addi a0, a0, 6
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-NEXT: lh a0, 6(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 7
@@ -14076,8 +14042,7 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB112_2
; RV64ZVE32F-NEXT: .LBB112_10: # %cond.load1
-; RV64ZVE32F-NEXT: addi a2, a0, 2
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 2(a0)
; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma
@@ -14085,48 +14050,42 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: beqz a2, .LBB112_3
; RV64ZVE32F-NEXT: .LBB112_11: # %cond.load4
-; RV64ZVE32F-NEXT: addi a2, a0, 16
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 16(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB112_4
; RV64ZVE32F-NEXT: .LBB112_12: # %cond.load7
-; RV64ZVE32F-NEXT: addi a2, a0, 18
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 18(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB112_5
; RV64ZVE32F-NEXT: .LBB112_13: # %cond.load10
-; RV64ZVE32F-NEXT: addi a2, a0, 8
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 8(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 4
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB112_6
; RV64ZVE32F-NEXT: .LBB112_14: # %cond.load13
-; RV64ZVE32F-NEXT: addi a2, a0, 10
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 10(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 5
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: beqz a2, .LBB112_7
; RV64ZVE32F-NEXT: .LBB112_15: # %cond.load16
-; RV64ZVE32F-NEXT: addi a2, a0, 4
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 4(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 6
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB112_8
; RV64ZVE32F-NEXT: .LBB112_16: # %cond.load19
-; RV64ZVE32F-NEXT: addi a0, a0, 6
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-NEXT: lh a0, 6(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 7
@@ -14193,8 +14152,7 @@ define <8 x i16> @mgather_gather_2xSEW_unaligned(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB113_2
; RV64ZVE32F-NEXT: .LBB113_10: # %cond.load1
-; RV64ZVE32F-NEXT: addi a2, a0, 2
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 2(a0)
; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma
@@ -14202,48 +14160,42 @@ define <8 x i16> @mgather_gather_2xSEW_unaligned(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: beqz a2, .LBB113_3
; RV64ZVE32F-NEXT: .LBB113_11: # %cond.load4
-; RV64ZVE32F-NEXT: addi a2, a0, 18
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 18(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB113_4
; RV64ZVE32F-NEXT: .LBB113_12: # %cond.load7
-; RV64ZVE32F-NEXT: addi a2, a0, 20
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 20(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB113_5
; RV64ZVE32F-NEXT: .LBB113_13: # %cond.load10
-; RV64ZVE32F-NEXT: addi a2, a0, 8
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 8(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 4
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB113_6
; RV64ZVE32F-NEXT: .LBB113_14: # %cond.load13
-; RV64ZVE32F-NEXT: addi a2, a0, 10
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 10(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 5
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: beqz a2, .LBB113_7
; RV64ZVE32F-NEXT: .LBB113_15: # %cond.load16
-; RV64ZVE32F-NEXT: addi a2, a0, 4
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 4(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 6
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB113_8
; RV64ZVE32F-NEXT: .LBB113_16: # %cond.load19
-; RV64ZVE32F-NEXT: addi a0, a0, 6
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-NEXT: lh a0, 6(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 7
@@ -14282,84 +14234,80 @@ define <8 x i16> @mgather_gather_2xSEW_unaligned2(ptr %base) {
; RV64ZVE32F-NEXT: vmset.m v8
; RV64ZVE32F-NEXT: vmv.x.s a1, v8
; RV64ZVE32F-NEXT: # implicit-def: $v8
-; RV64ZVE32F-NEXT: bnez zero, .LBB114_2
-; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV64ZVE32F-NEXT: addi a2, a0, 2
-; RV64ZVE32F-NEXT: vlse16.v v8, (a2), zero
-; RV64ZVE32F-NEXT: .LBB114_2: # %else
-; RV64ZVE32F-NEXT: andi a3, a1, 2
-; RV64ZVE32F-NEXT: addi a2, a0, 4
-; RV64ZVE32F-NEXT: bnez a3, .LBB114_10
-; RV64ZVE32F-NEXT: # %bb.3: # %else2
-; RV64ZVE32F-NEXT: andi a3, a1, 4
-; RV64ZVE32F-NEXT: bnez a3, .LBB114_11
-; RV64ZVE32F-NEXT: .LBB114_4: # %else5
-; RV64ZVE32F-NEXT: andi a3, a1, 8
-; RV64ZVE32F-NEXT: bnez a3, .LBB114_12
-; RV64ZVE32F-NEXT: .LBB114_5: # %else8
-; RV64ZVE32F-NEXT: andi a3, a1, 16
-; RV64ZVE32F-NEXT: bnez a3, .LBB114_13
-; RV64ZVE32F-NEXT: .LBB114_6: # %else11
-; RV64ZVE32F-NEXT: andi a3, a1, 32
-; RV64ZVE32F-NEXT: bnez a3, .LBB114_14
-; RV64ZVE32F-NEXT: .LBB114_7: # %else14
-; RV64ZVE32F-NEXT: andi a3, a1, 64
-; RV64ZVE32F-NEXT: bnez a3, .LBB114_15
-; RV64ZVE32F-NEXT: .LBB114_8: # %else17
+; RV64ZVE32F-NEXT: beqz zero, .LBB114_9
+; RV64ZVE32F-NEXT: # %bb.1: # %else
+; RV64ZVE32F-NEXT: andi a2, a1, 2
+; RV64ZVE32F-NEXT: bnez a2, .LBB114_10
+; RV64ZVE32F-NEXT: .LBB114_2: # %else2
+; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: bnez a2, .LBB114_11
+; RV64ZVE32F-NEXT: .LBB114_3: # %else5
+; RV64ZVE32F-NEXT: andi a2, a1, 8
+; RV64ZVE32F-NEXT: bnez a2, .LBB114_12
+; RV64ZVE32F-NEXT: .LBB114_4: # %else8
+; RV64ZVE32F-NEXT: andi a2, a1, 16
+; RV64ZVE32F-NEXT: bnez a2, .LBB114_13
+; RV64ZVE32F-NEXT: .LBB114_5: # %else11
+; RV64ZVE32F-NEXT: andi a2, a1, 32
+; RV64ZVE32F-NEXT: bnez a2, .LBB114_14
+; RV64ZVE32F-NEXT: .LBB114_6: # %else14
+; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: bnez a2, .LBB114_15
+; RV64ZVE32F-NEXT: .LBB114_7: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: bnez a1, .LBB114_16
-; RV64ZVE32F-NEXT: .LBB114_9: # %else20
+; RV64ZVE32F-NEXT: .LBB114_8: # %else20
; RV64ZVE32F-NEXT: ret
+; RV64ZVE32F-NEXT: .LBB114_9: # %cond.load
+; RV64ZVE32F-NEXT: addi a2, a0, 2
+; RV64ZVE32F-NEXT: vlse16.v v8, (a2), zero
+; RV64ZVE32F-NEXT: andi a2, a1, 2
+; RV64ZVE32F-NEXT: beqz a2, .LBB114_2
; RV64ZVE32F-NEXT: .LBB114_10: # %cond.load1
-; RV64ZVE32F-NEXT: lh a3, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 4(a0)
; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v9, a3
+; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT: andi a3, a1, 4
-; RV64ZVE32F-NEXT: beqz a3, .LBB114_4
+; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: beqz a2, .LBB114_3
; RV64ZVE32F-NEXT: .LBB114_11: # %cond.load4
-; RV64ZVE32F-NEXT: addi a3, a0, 18
-; RV64ZVE32F-NEXT: lh a3, 0(a3)
+; RV64ZVE32F-NEXT: lh a2, 18(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma
-; RV64ZVE32F-NEXT: vmv.s.x v9, a3
+; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2
-; RV64ZVE32F-NEXT: andi a3, a1, 8
-; RV64ZVE32F-NEXT: beqz a3, .LBB114_5
+; RV64ZVE32F-NEXT: andi a2, a1, 8
+; RV64ZVE32F-NEXT: beqz a2, .LBB114_4
; RV64ZVE32F-NEXT: .LBB114_12: # %cond.load7
-; RV64ZVE32F-NEXT: addi a3, a0, 20
-; RV64ZVE32F-NEXT: lh a3, 0(a3)
+; RV64ZVE32F-NEXT: lh a2, 20(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma
-; RV64ZVE32F-NEXT: vmv.s.x v9, a3
+; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
-; RV64ZVE32F-NEXT: andi a3, a1, 16
-; RV64ZVE32F-NEXT: beqz a3, .LBB114_6
+; RV64ZVE32F-NEXT: andi a2, a1, 16
+; RV64ZVE32F-NEXT: beqz a2, .LBB114_5
; RV64ZVE32F-NEXT: .LBB114_13: # %cond.load10
-; RV64ZVE32F-NEXT: addi a3, a0, 8
-; RV64ZVE32F-NEXT: lh a3, 0(a3)
+; RV64ZVE32F-NEXT: lh a2, 8(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma
-; RV64ZVE32F-NEXT: vmv.s.x v9, a3
+; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 4
-; RV64ZVE32F-NEXT: andi a3, a1, 32
-; RV64ZVE32F-NEXT: beqz a3, .LBB114_7
+; RV64ZVE32F-NEXT: andi a2, a1, 32
+; RV64ZVE32F-NEXT: beqz a2, .LBB114_6
; RV64ZVE32F-NEXT: .LBB114_14: # %cond.load13
-; RV64ZVE32F-NEXT: addi a3, a0, 10
-; RV64ZVE32F-NEXT: lh a3, 0(a3)
+; RV64ZVE32F-NEXT: lh a2, 10(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
-; RV64ZVE32F-NEXT: vmv.s.x v9, a3
+; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 5
-; RV64ZVE32F-NEXT: andi a3, a1, 64
-; RV64ZVE32F-NEXT: beqz a3, .LBB114_8
+; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: beqz a2, .LBB114_7
; RV64ZVE32F-NEXT: .LBB114_15: # %cond.load16
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 4(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 6
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB114_9
+; RV64ZVE32F-NEXT: beqz a1, .LBB114_8
; RV64ZVE32F-NEXT: .LBB114_16: # %cond.load19
-; RV64ZVE32F-NEXT: addi a0, a0, 6
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-NEXT: lh a0, 6(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 7
@@ -14430,8 +14378,7 @@ define <8 x i16> @mgather_gather_4xSEW(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB115_2
; RV64ZVE32F-NEXT: .LBB115_10: # %cond.load1
-; RV64ZVE32F-NEXT: addi a2, a0, 2
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 2(a0)
; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma
@@ -14439,48 +14386,42 @@ define <8 x i16> @mgather_gather_4xSEW(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: beqz a2, .LBB115_3
; RV64ZVE32F-NEXT: .LBB115_11: # %cond.load4
-; RV64ZVE32F-NEXT: addi a2, a0, 4
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 4(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB115_4
; RV64ZVE32F-NEXT: .LBB115_12: # %cond.load7
-; RV64ZVE32F-NEXT: addi a2, a0, 6
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 6(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB115_5
; RV64ZVE32F-NEXT: .LBB115_13: # %cond.load10
-; RV64ZVE32F-NEXT: addi a2, a0, 16
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 16(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 4
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB115_6
; RV64ZVE32F-NEXT: .LBB115_14: # %cond.load13
-; RV64ZVE32F-NEXT: addi a2, a0, 18
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 18(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 5
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: beqz a2, .LBB115_7
; RV64ZVE32F-NEXT: .LBB115_15: # %cond.load16
-; RV64ZVE32F-NEXT: addi a2, a0, 20
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 20(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 6
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB115_8
; RV64ZVE32F-NEXT: .LBB115_16: # %cond.load19
-; RV64ZVE32F-NEXT: addi a0, a0, 22
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-NEXT: lh a0, 22(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 7
@@ -14548,8 +14489,7 @@ define <8 x i16> @mgather_gather_4xSEW_partial_align(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB116_2
; RV64ZVE32F-NEXT: .LBB116_10: # %cond.load1
-; RV64ZVE32F-NEXT: addi a2, a0, 2
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 2(a0)
; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma
@@ -14557,48 +14497,42 @@ define <8 x i16> @mgather_gather_4xSEW_partial_align(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: beqz a2, .LBB116_3
; RV64ZVE32F-NEXT: .LBB116_11: # %cond.load4
-; RV64ZVE32F-NEXT: addi a2, a0, 4
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 4(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB116_4
; RV64ZVE32F-NEXT: .LBB116_12: # %cond.load7
-; RV64ZVE32F-NEXT: addi a2, a0, 6
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 6(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB116_5
; RV64ZVE32F-NEXT: .LBB116_13: # %cond.load10
-; RV64ZVE32F-NEXT: addi a2, a0, 16
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 16(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 4
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB116_6
; RV64ZVE32F-NEXT: .LBB116_14: # %cond.load13
-; RV64ZVE32F-NEXT: addi a2, a0, 18
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 18(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 5
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: beqz a2, .LBB116_7
; RV64ZVE32F-NEXT: .LBB116_15: # %cond.load16
-; RV64ZVE32F-NEXT: addi a2, a0, 20
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 20(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 6
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB116_8
; RV64ZVE32F-NEXT: .LBB116_16: # %cond.load19
-; RV64ZVE32F-NEXT: addi a0, a0, 22
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-NEXT: lh a0, 22(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 7
@@ -14678,8 +14612,7 @@ define <8 x i16> @mgather_shuffle_rotate(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB118_2
; RV64ZVE32F-NEXT: .LBB118_10: # %cond.load1
-; RV64ZVE32F-NEXT: addi a2, a0, 10
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 10(a0)
; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma
@@ -14687,16 +14620,14 @@ define <8 x i16> @mgather_shuffle_rotate(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: beqz a2, .LBB118_3
; RV64ZVE32F-NEXT: .LBB118_11: # %cond.load4
-; RV64ZVE32F-NEXT: addi a2, a0, 12
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 12(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB118_4
; RV64ZVE32F-NEXT: .LBB118_12: # %cond.load7
-; RV64ZVE32F-NEXT: addi a2, a0, 14
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 14(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
@@ -14710,24 +14641,21 @@ define <8 x i16> @mgather_shuffle_rotate(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB118_6
; RV64ZVE32F-NEXT: .LBB118_14: # %cond.load13
-; RV64ZVE32F-NEXT: addi a2, a0, 2
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 2(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 5
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: beqz a2, .LBB118_7
; RV64ZVE32F-NEXT: .LBB118_15: # %cond.load16
-; RV64ZVE32F-NEXT: addi a2, a0, 4
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 4(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 6
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB118_8
; RV64ZVE32F-NEXT: .LBB118_16: # %cond.load19
-; RV64ZVE32F-NEXT: addi a0, a0, 6
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-NEXT: lh a0, 6(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 7
@@ -14795,8 +14723,7 @@ define <8 x i16> @mgather_shuffle_vrgather(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB119_2
; RV64ZVE32F-NEXT: .LBB119_10: # %cond.load1
-; RV64ZVE32F-NEXT: addi a2, a0, 4
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 4(a0)
; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma
@@ -14804,48 +14731,42 @@ define <8 x i16> @mgather_shuffle_vrgather(ptr %base) {
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: beqz a2, .LBB119_3
; RV64ZVE32F-NEXT: .LBB119_11: # %cond.load4
-; RV64ZVE32F-NEXT: addi a2, a0, 6
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 6(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB119_4
; RV64ZVE32F-NEXT: .LBB119_12: # %cond.load7
-; RV64ZVE32F-NEXT: addi a2, a0, 2
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 2(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB119_5
; RV64ZVE32F-NEXT: .LBB119_13: # %cond.load10
-; RV64ZVE32F-NEXT: addi a2, a0, 8
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 8(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 4
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB119_6
; RV64ZVE32F-NEXT: .LBB119_14: # %cond.load13
-; RV64ZVE32F-NEXT: addi a2, a0, 10
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 10(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 5
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: beqz a2, .LBB119_7
; RV64ZVE32F-NEXT: .LBB119_15: # %cond.load16
-; RV64ZVE32F-NEXT: addi a2, a0, 12
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: lh a2, 12(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 6
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB119_8
; RV64ZVE32F-NEXT: .LBB119_16: # %cond.load19
-; RV64ZVE32F-NEXT: addi a0, a0, 14
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-NEXT: lh a0, 14(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 7
diff --git a/llvm/test/CodeGen/RISCV/split-offsets.ll b/llvm/test/CodeGen/RISCV/split-offsets.ll
index 2ec3a5e464b8799..8d065daa2067c4a 100644
--- a/llvm/test/CodeGen/RISCV/split-offsets.ll
+++ b/llvm/test/CodeGen/RISCV/split-offsets.ll
@@ -157,23 +157,21 @@ define void @test4(ptr %dest) {
; RV32I-LABEL: test4:
; RV32I: # %bb.0:
; RV32I-NEXT: addi a0, a0, 2047
-; RV32I-NEXT: addi a1, a0, 1
-; RV32I-NEXT: li a2, 1
-; RV32I-NEXT: sb a2, 1(a0)
-; RV32I-NEXT: sb a2, 1(a1)
-; RV32I-NEXT: sb a2, 2(a1)
-; RV32I-NEXT: sb a2, 3(a1)
+; RV32I-NEXT: li a1, 1
+; RV32I-NEXT: sb a1, 1(a0)
+; RV32I-NEXT: sb a1, 2(a0)
+; RV32I-NEXT: sb a1, 3(a0)
+; RV32I-NEXT: sb a1, 4(a0)
; RV32I-NEXT: ret
;
; RV64I-LABEL: test4:
; RV64I: # %bb.0:
; RV64I-NEXT: addi a0, a0, 2047
-; RV64I-NEXT: addi a1, a0, 1
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: sb a2, 1(a0)
-; RV64I-NEXT: sb a2, 1(a1)
-; RV64I-NEXT: sb a2, 2(a1)
-; RV64I-NEXT: sb a2, 3(a1)
+; RV64I-NEXT: li a1, 1
+; RV64I-NEXT: sb a1, 1(a0)
+; RV64I-NEXT: sb a1, 2(a0)
+; RV64I-NEXT: sb a1, 3(a0)
+; RV64I-NEXT: sb a1, 4(a0)
; RV64I-NEXT: ret
%p1 = getelementptr i8, ptr %dest, i32 2048
store i8 1, ptr %p1
diff --git a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
index b5f1efa4b160ba9..231c066de543786 100644
--- a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
@@ -1085,15 +1085,15 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
; RV32I-NEXT: sw s6, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s7, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s8, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lw s1, 24(a1)
-; RV32I-NEXT: lw s2, 28(a1)
-; RV32I-NEXT: lw s3, 16(a1)
-; RV32I-NEXT: lw s4, 20(a1)
-; RV32I-NEXT: lw s5, 8(a1)
-; RV32I-NEXT: lw s6, 12(a1)
+; RV32I-NEXT: lw s0, 24(a1)
+; RV32I-NEXT: lw s1, 28(a1)
+; RV32I-NEXT: lw s2, 16(a1)
+; RV32I-NEXT: lw s3, 20(a1)
+; RV32I-NEXT: lw s4, 8(a1)
+; RV32I-NEXT: lw s5, 12(a1)
; RV32I-NEXT: lw a3, 0(a1)
; RV32I-NEXT: lw a1, 4(a1)
-; RV32I-NEXT: mv s0, a0
+; RV32I-NEXT: mv s6, a0
; RV32I-NEXT: li a2, 1
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: li a3, 0
@@ -1101,33 +1101,33 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
; RV32I-NEXT: mv s7, a0
; RV32I-NEXT: mv s8, a1
; RV32I-NEXT: li a2, 654
-; RV32I-NEXT: mv a0, s5
-; RV32I-NEXT: mv a1, s6
+; RV32I-NEXT: mv a0, s4
+; RV32I-NEXT: mv a1, s5
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: call __moddi3 at plt
-; RV32I-NEXT: mv s5, a0
-; RV32I-NEXT: mv s6, a1
+; RV32I-NEXT: mv s4, a0
+; RV32I-NEXT: mv s5, a1
; RV32I-NEXT: li a2, 23
-; RV32I-NEXT: mv a0, s3
-; RV32I-NEXT: mv a1, s4
+; RV32I-NEXT: mv a0, s2
+; RV32I-NEXT: mv a1, s3
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: call __moddi3 at plt
-; RV32I-NEXT: mv s3, a0
-; RV32I-NEXT: mv s4, a1
+; RV32I-NEXT: mv s2, a0
+; RV32I-NEXT: mv s3, a1
; RV32I-NEXT: lui a0, 1
; RV32I-NEXT: addi a2, a0, 1327
-; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: mv a1, s2
+; RV32I-NEXT: mv a0, s0
+; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: call __moddi3 at plt
-; RV32I-NEXT: sw a1, 28(s0)
-; RV32I-NEXT: sw a0, 24(s0)
-; RV32I-NEXT: sw s4, 20(s0)
-; RV32I-NEXT: sw s3, 16(s0)
-; RV32I-NEXT: sw s6, 12(s0)
-; RV32I-NEXT: sw s5, 8(s0)
-; RV32I-NEXT: sw s8, 4(s0)
-; RV32I-NEXT: sw s7, 0(s0)
+; RV32I-NEXT: sw a1, 28(s6)
+; RV32I-NEXT: sw a0, 24(s6)
+; RV32I-NEXT: sw s3, 20(s6)
+; RV32I-NEXT: sw s2, 16(s6)
+; RV32I-NEXT: sw s5, 12(s6)
+; RV32I-NEXT: sw s4, 8(s6)
+; RV32I-NEXT: sw s8, 4(s6)
+; RV32I-NEXT: sw s7, 0(s6)
; RV32I-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
@@ -1154,15 +1154,15 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
; RV32IM-NEXT: sw s6, 16(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s7, 12(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s8, 8(sp) # 4-byte Folded Spill
-; RV32IM-NEXT: lw s1, 24(a1)
-; RV32IM-NEXT: lw s2, 28(a1)
-; RV32IM-NEXT: lw s3, 16(a1)
-; RV32IM-NEXT: lw s4, 20(a1)
-; RV32IM-NEXT: lw s5, 8(a1)
-; RV32IM-NEXT: lw s6, 12(a1)
+; RV32IM-NEXT: lw s0, 24(a1)
+; RV32IM-NEXT: lw s1, 28(a1)
+; RV32IM-NEXT: lw s2, 16(a1)
+; RV32IM-NEXT: lw s3, 20(a1)
+; RV32IM-NEXT: lw s4, 8(a1)
+; RV32IM-NEXT: lw s5, 12(a1)
; RV32IM-NEXT: lw a3, 0(a1)
; RV32IM-NEXT: lw a1, 4(a1)
-; RV32IM-NEXT: mv s0, a0
+; RV32IM-NEXT: mv s6, a0
; RV32IM-NEXT: li a2, 1
; RV32IM-NEXT: mv a0, a3
; RV32IM-NEXT: li a3, 0
@@ -1170,33 +1170,33 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
; RV32IM-NEXT: mv s7, a0
; RV32IM-NEXT: mv s8, a1
; RV32IM-NEXT: li a2, 654
-; RV32IM-NEXT: mv a0, s5
-; RV32IM-NEXT: mv a1, s6
+; RV32IM-NEXT: mv a0, s4
+; RV32IM-NEXT: mv a1, s5
; RV32IM-NEXT: li a3, 0
; RV32IM-NEXT: call __moddi3 at plt
-; RV32IM-NEXT: mv s5, a0
-; RV32IM-NEXT: mv s6, a1
+; RV32IM-NEXT: mv s4, a0
+; RV32IM-NEXT: mv s5, a1
; RV32IM-NEXT: li a2, 23
-; RV32IM-NEXT: mv a0, s3
-; RV32IM-NEXT: mv a1, s4
+; RV32IM-NEXT: mv a0, s2
+; RV32IM-NEXT: mv a1, s3
; RV32IM-NEXT: li a3, 0
; RV32IM-NEXT: call __moddi3 at plt
-; RV32IM-NEXT: mv s3, a0
-; RV32IM-NEXT: mv s4, a1
+; RV32IM-NEXT: mv s2, a0
+; RV32IM-NEXT: mv s3, a1
; RV32IM-NEXT: lui a0, 1
; RV32IM-NEXT: addi a2, a0, 1327
-; RV32IM-NEXT: mv a0, s1
-; RV32IM-NEXT: mv a1, s2
+; RV32IM-NEXT: mv a0, s0
+; RV32IM-NEXT: mv a1, s1
; RV32IM-NEXT: li a3, 0
; RV32IM-NEXT: call __moddi3 at plt
-; RV32IM-NEXT: sw a1, 28(s0)
-; RV32IM-NEXT: sw a0, 24(s0)
-; RV32IM-NEXT: sw s4, 20(s0)
-; RV32IM-NEXT: sw s3, 16(s0)
-; RV32IM-NEXT: sw s6, 12(s0)
-; RV32IM-NEXT: sw s5, 8(s0)
-; RV32IM-NEXT: sw s8, 4(s0)
-; RV32IM-NEXT: sw s7, 0(s0)
+; RV32IM-NEXT: sw a1, 28(s6)
+; RV32IM-NEXT: sw a0, 24(s6)
+; RV32IM-NEXT: sw s3, 20(s6)
+; RV32IM-NEXT: sw s2, 16(s6)
+; RV32IM-NEXT: sw s5, 12(s6)
+; RV32IM-NEXT: sw s4, 8(s6)
+; RV32IM-NEXT: sw s8, 4(s6)
+; RV32IM-NEXT: sw s7, 0(s6)
; RV32IM-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
index a38ae17f19df385..d8f364ec8c00f5a 100644
--- a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
@@ -791,15 +791,15 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
; RV32I-NEXT: sw s6, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s7, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s8, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lw s1, 24(a1)
-; RV32I-NEXT: lw s2, 28(a1)
-; RV32I-NEXT: lw s3, 16(a1)
-; RV32I-NEXT: lw s4, 20(a1)
-; RV32I-NEXT: lw s5, 8(a1)
-; RV32I-NEXT: lw s6, 12(a1)
+; RV32I-NEXT: lw s0, 24(a1)
+; RV32I-NEXT: lw s1, 28(a1)
+; RV32I-NEXT: lw s2, 16(a1)
+; RV32I-NEXT: lw s3, 20(a1)
+; RV32I-NEXT: lw s4, 8(a1)
+; RV32I-NEXT: lw s5, 12(a1)
; RV32I-NEXT: lw a3, 0(a1)
; RV32I-NEXT: lw a1, 4(a1)
-; RV32I-NEXT: mv s0, a0
+; RV32I-NEXT: mv s6, a0
; RV32I-NEXT: li a2, 1
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: li a3, 0
@@ -807,33 +807,33 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
; RV32I-NEXT: mv s7, a0
; RV32I-NEXT: mv s8, a1
; RV32I-NEXT: li a2, 654
-; RV32I-NEXT: mv a0, s5
-; RV32I-NEXT: mv a1, s6
+; RV32I-NEXT: mv a0, s4
+; RV32I-NEXT: mv a1, s5
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: call __umoddi3 at plt
-; RV32I-NEXT: mv s5, a0
-; RV32I-NEXT: mv s6, a1
+; RV32I-NEXT: mv s4, a0
+; RV32I-NEXT: mv s5, a1
; RV32I-NEXT: li a2, 23
-; RV32I-NEXT: mv a0, s3
-; RV32I-NEXT: mv a1, s4
+; RV32I-NEXT: mv a0, s2
+; RV32I-NEXT: mv a1, s3
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: call __umoddi3 at plt
-; RV32I-NEXT: mv s3, a0
-; RV32I-NEXT: mv s4, a1
+; RV32I-NEXT: mv s2, a0
+; RV32I-NEXT: mv s3, a1
; RV32I-NEXT: lui a0, 1
; RV32I-NEXT: addi a2, a0, 1327
-; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: mv a1, s2
+; RV32I-NEXT: mv a0, s0
+; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: call __umoddi3 at plt
-; RV32I-NEXT: sw a1, 28(s0)
-; RV32I-NEXT: sw a0, 24(s0)
-; RV32I-NEXT: sw s4, 20(s0)
-; RV32I-NEXT: sw s3, 16(s0)
-; RV32I-NEXT: sw s6, 12(s0)
-; RV32I-NEXT: sw s5, 8(s0)
-; RV32I-NEXT: sw s8, 4(s0)
-; RV32I-NEXT: sw s7, 0(s0)
+; RV32I-NEXT: sw a1, 28(s6)
+; RV32I-NEXT: sw a0, 24(s6)
+; RV32I-NEXT: sw s3, 20(s6)
+; RV32I-NEXT: sw s2, 16(s6)
+; RV32I-NEXT: sw s5, 12(s6)
+; RV32I-NEXT: sw s4, 8(s6)
+; RV32I-NEXT: sw s8, 4(s6)
+; RV32I-NEXT: sw s7, 0(s6)
; RV32I-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
@@ -860,15 +860,15 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
; RV32IM-NEXT: sw s6, 16(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s7, 12(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s8, 8(sp) # 4-byte Folded Spill
-; RV32IM-NEXT: lw s1, 24(a1)
-; RV32IM-NEXT: lw s2, 28(a1)
-; RV32IM-NEXT: lw s3, 16(a1)
-; RV32IM-NEXT: lw s4, 20(a1)
-; RV32IM-NEXT: lw s5, 8(a1)
-; RV32IM-NEXT: lw s6, 12(a1)
+; RV32IM-NEXT: lw s0, 24(a1)
+; RV32IM-NEXT: lw s1, 28(a1)
+; RV32IM-NEXT: lw s2, 16(a1)
+; RV32IM-NEXT: lw s3, 20(a1)
+; RV32IM-NEXT: lw s4, 8(a1)
+; RV32IM-NEXT: lw s5, 12(a1)
; RV32IM-NEXT: lw a3, 0(a1)
; RV32IM-NEXT: lw a1, 4(a1)
-; RV32IM-NEXT: mv s0, a0
+; RV32IM-NEXT: mv s6, a0
; RV32IM-NEXT: li a2, 1
; RV32IM-NEXT: mv a0, a3
; RV32IM-NEXT: li a3, 0
@@ -876,33 +876,33 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
; RV32IM-NEXT: mv s7, a0
; RV32IM-NEXT: mv s8, a1
; RV32IM-NEXT: li a2, 654
-; RV32IM-NEXT: mv a0, s5
-; RV32IM-NEXT: mv a1, s6
+; RV32IM-NEXT: mv a0, s4
+; RV32IM-NEXT: mv a1, s5
; RV32IM-NEXT: li a3, 0
; RV32IM-NEXT: call __umoddi3 at plt
-; RV32IM-NEXT: mv s5, a0
-; RV32IM-NEXT: mv s6, a1
+; RV32IM-NEXT: mv s4, a0
+; RV32IM-NEXT: mv s5, a1
; RV32IM-NEXT: li a2, 23
-; RV32IM-NEXT: mv a0, s3
-; RV32IM-NEXT: mv a1, s4
+; RV32IM-NEXT: mv a0, s2
+; RV32IM-NEXT: mv a1, s3
; RV32IM-NEXT: li a3, 0
; RV32IM-NEXT: call __umoddi3 at plt
-; RV32IM-NEXT: mv s3, a0
-; RV32IM-NEXT: mv s4, a1
+; RV32IM-NEXT: mv s2, a0
+; RV32IM-NEXT: mv s3, a1
; RV32IM-NEXT: lui a0, 1
; RV32IM-NEXT: addi a2, a0, 1327
-; RV32IM-NEXT: mv a0, s1
-; RV32IM-NEXT: mv a1, s2
+; RV32IM-NEXT: mv a0, s0
+; RV32IM-NEXT: mv a1, s1
; RV32IM-NEXT: li a3, 0
; RV32IM-NEXT: call __umoddi3 at plt
-; RV32IM-NEXT: sw a1, 28(s0)
-; RV32IM-NEXT: sw a0, 24(s0)
-; RV32IM-NEXT: sw s4, 20(s0)
-; RV32IM-NEXT: sw s3, 16(s0)
-; RV32IM-NEXT: sw s6, 12(s0)
-; RV32IM-NEXT: sw s5, 8(s0)
-; RV32IM-NEXT: sw s8, 4(s0)
-; RV32IM-NEXT: sw s7, 0(s0)
+; RV32IM-NEXT: sw a1, 28(s6)
+; RV32IM-NEXT: sw a0, 24(s6)
+; RV32IM-NEXT: sw s3, 20(s6)
+; RV32IM-NEXT: sw s2, 16(s6)
+; RV32IM-NEXT: sw s5, 12(s6)
+; RV32IM-NEXT: sw s4, 8(s6)
+; RV32IM-NEXT: sw s8, 4(s6)
+; RV32IM-NEXT: sw s7, 0(s6)
; RV32IM-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
>From e721d9e46a8cd64afd174a7ca8395cfcee4ae92a Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 27 Sep 2023 13:44:35 -0700
Subject: [PATCH 2/2] !fixup clang-format
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 17 ++++++++---------
1 file changed, 8 insertions(+), 9 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 7b9f8b08002c077..244e80c6ad36fdc 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -1907,8 +1907,7 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
return true;
}
-bool RISCVInstrInfo::canFoldIntoAddrMode(const MachineInstr &MemI,
- Register Reg,
+bool RISCVInstrInfo::canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg,
const MachineInstr &AddrI,
ExtAddrMode &AM) const {
switch (MemI.getOpcode()) {
@@ -1969,13 +1968,13 @@ MachineInstr *RISCVInstrInfo::emitLdStWithAddr(MachineInstr &MemI,
"Addressing mode not supported for folding");
auto B = BuildMI(MBB, MemI, DL, get(MemI.getOpcode()))
- .addReg(MemI.getOperand(0).getReg(),
- MemI.mayLoad() ? RegState::Define : 0)
- .addReg(AM.BaseReg)
- .addImm(AM.Displacement)
- .setMemRefs(MemI.memoperands())
- .setMIFlags(MemI.getFlags());
- return B.getInstr();
+ .addReg(MemI.getOperand(0).getReg(),
+ MemI.mayLoad() ? RegState::Define : 0)
+ .addReg(AM.BaseReg)
+ .addImm(AM.Displacement)
+ .setMemRefs(MemI.memoperands())
+ .setMIFlags(MemI.getFlags());
+ return B.getInstr();
}
// Return true if get the base operand, byte offset of an instruction and the
More information about the llvm-commits
mailing list