[llvm] [RISCV][WIP] Add a rematerializable pseudo instruction for LUI+ADDI for global addresses. (PR #93320)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Fri May 24 09:42:08 PDT 2024
https://github.com/topperc created https://github.com/llvm/llvm-project/pull/93320
This is what #91432 looks like without #93129.
>From f736cd5fa1aba5dd35e6dd397736c33f78f6e91c Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 24 May 2024 09:40:16 -0700
Subject: [PATCH] [RISCV][WIP] Add a rematerializable pseudo instruction for
LUI+ADDI for global addresses.
This is what #91432 looks like without #93129.
---
llvm/lib/Target/RISCV/RISCVInstrInfo.td | 16 +
.../lib/Target/RISCV/RISCVMergeBaseOffset.cpp | 35 +-
.../RISCV/RISCVPostRAExpandPseudoInsts.cpp | 23 ++
llvm/test/CodeGen/RISCV/bfloat-mem.ll | 6 +-
llvm/test/CodeGen/RISCV/byval.ll | 5 +-
.../test/CodeGen/RISCV/callee-saved-fpr32s.ll | 48 ++-
.../test/CodeGen/RISCV/callee-saved-fpr64s.ll | 30 +-
llvm/test/CodeGen/RISCV/callee-saved-gprs.ll | 304 ++++++++-------
llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll | 22 +-
.../CodeGen/RISCV/ctz_zero_return_test.ll | 8 +-
llvm/test/CodeGen/RISCV/double-mem.ll | 15 +-
.../early-clobber-tied-def-subreg-liveness.ll | 14 +-
llvm/test/CodeGen/RISCV/float-mem.ll | 12 +-
.../test/CodeGen/RISCV/fold-addi-loadstore.ll | 32 +-
.../test/CodeGen/RISCV/global-merge-offset.ll | 7 +-
llvm/test/CodeGen/RISCV/global-merge.ll | 1 +
llvm/test/CodeGen/RISCV/half-mem.ll | 24 +-
.../CodeGen/RISCV/hoist-global-addr-base.ll | 3 +-
llvm/test/CodeGen/RISCV/mem.ll | 6 +-
llvm/test/CodeGen/RISCV/mem64.ll | 6 +-
llvm/test/CodeGen/RISCV/memcpy.ll | 123 +++---
llvm/test/CodeGen/RISCV/push-pop-popret.ll | 354 +++++++++---------
llvm/test/CodeGen/RISCV/rv32xtheadbb.ll | 4 +-
llvm/test/CodeGen/RISCV/rv32zbb.ll | 4 +-
.../CodeGen/RISCV/rv64-legal-i32/mem64.ll | 6 +-
.../CodeGen/RISCV/rvv/active_lane_mask.ll | 40 +-
.../CodeGen/RISCV/rvv/fixed-vectors-int.ll | 4 +-
.../rvv/fixed-vectors-interleaved-access.ll | 275 +++++++-------
.../RISCV/rvv/fixed-vectors-mask-buildvec.ll | 20 +-
.../RISCV/rvv/fixed-vectors-masked-gather.ll | 16 +-
.../rvv/fixed-vectors-shuffle-reverse.ll | 80 ++--
.../RISCV/rvv/fixed-vectors-stepvector.ll | 10 +-
.../rvv/fixed-vectors-store-merge-crash.ll | 14 +-
.../test/CodeGen/RISCV/rvv/shuffle-reverse.ll | 50 +--
llvm/test/CodeGen/RISCV/tail-calls.ll | 8 +-
llvm/test/CodeGen/RISCV/unroll-loop-cse.ll | 32 +-
.../CodeGen/RISCV/zext-with-load-is-free.ll | 22 +-
37 files changed, 900 insertions(+), 779 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 9d574edb4e6d1..8903ddc1903af 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1311,6 +1311,22 @@ def : Pat<(FrameAddrRegImm (iPTR GPR:$rs1), simm12:$imm12),
/// HI and ADD_LO address nodes.
+let Size = 8, isReMaterializable = 1 in
+def PseudoLIaddr : Pseudo<(outs GPR:$dst), (ins uimm20_lui:$hi, simm12:$lo), []>,
+ Sched<[WriteIALU]>;
+
+def LUIADDI : PatFrag<(ops node:$hi, node:$lo),
+ (riscv_add_lo (riscv_hi node:$hi), node:$lo)>;
+
+def : Pat<(LUIADDI tglobaladdr:$hi, tglobaladdr:$lo),
+ (PseudoLIaddr tglobaladdr:$hi, tglobaladdr:$lo)>;
+def : Pat<(LUIADDI tblockaddress:$hi, tblockaddress:$lo),
+ (PseudoLIaddr tblockaddress:$hi, tblockaddress:$lo)>;
+def : Pat<(LUIADDI tjumptable:$hi, tjumptable:$lo),
+ (PseudoLIaddr tjumptable:$hi, tjumptable:$lo)>;
+def : Pat<(LUIADDI tconstpool:$hi, tconstpool:$lo),
+ (PseudoLIaddr tconstpool:$hi, tconstpool:$lo)>;
+
def : Pat<(riscv_hi tglobaladdr:$in), (LUI tglobaladdr:$in)>;
def : Pat<(riscv_hi tblockaddress:$in), (LUI tblockaddress:$in)>;
def : Pat<(riscv_hi tjumptable:$in), (LUI tjumptable:$in)>;
diff --git a/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp b/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
index 410989177a8b9..1b8ad38682b55 100644
--- a/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
@@ -84,7 +84,8 @@ INITIALIZE_PASS(RISCVMergeBaseOffsetOpt, DEBUG_TYPE,
// 3) The offset value in the Global Address or Constant Pool is 0.
bool RISCVMergeBaseOffsetOpt::detectFoldable(MachineInstr &Hi,
MachineInstr *&Lo) {
- if (Hi.getOpcode() != RISCV::LUI && Hi.getOpcode() != RISCV::AUIPC)
+ if (Hi.getOpcode() != RISCV::LUI && Hi.getOpcode() != RISCV::AUIPC &&
+ Hi.getOpcode() != RISCV::PseudoLIaddr)
return false;
const MachineOperand &HiOp1 = Hi.getOperand(1);
@@ -97,16 +98,22 @@ bool RISCVMergeBaseOffsetOpt::detectFoldable(MachineInstr &Hi,
HiOp1.getOffset() != 0)
return false;
- Register HiDestReg = Hi.getOperand(0).getReg();
- if (!MRI->hasOneUse(HiDestReg))
- return false;
+ if (Hi.getOpcode() == RISCV::PseudoLIaddr) {
+ // Most of the code should handle it correctly without modification by
+ // setting Lo and Hi both point to PseudoLIaddr
+ Lo = &Hi;
+ } else {
+ Register HiDestReg = Hi.getOperand(0).getReg();
+ if (!MRI->hasOneUse(HiDestReg))
+ return false;
- Lo = &*MRI->use_instr_begin(HiDestReg);
- if (Lo->getOpcode() != RISCV::ADDI)
- return false;
+ Lo = &*MRI->use_instr_begin(HiDestReg);
+ if (Lo->getOpcode() != RISCV::ADDI)
+ return false;
+ }
const MachineOperand &LoOp2 = Lo->getOperand(2);
- if (Hi.getOpcode() == RISCV::LUI) {
+ if (Hi.getOpcode() == RISCV::LUI || Hi.getOpcode() == RISCV::PseudoLIaddr) {
if (LoOp2.getTargetFlags() != RISCVII::MO_LO ||
!(LoOp2.isGlobal() || LoOp2.isCPI() || LoOp2.isBlockAddress()) ||
LoOp2.getOffset() != 0)
@@ -466,6 +473,13 @@ bool RISCVMergeBaseOffsetOpt::foldIntoMemoryOps(MachineInstr &Hi,
Hi.getOperand(1).setOffset(NewOffset);
MachineOperand &ImmOp = Lo.getOperand(2);
+ // Expand PseudoLIaddr into LUI
+ if (Hi.getOpcode() == RISCV::PseudoLIaddr) {
+ auto *TII = ST->getInstrInfo();
+ Hi.setDesc(TII->get(RISCV::LUI));
+ Hi.removeOperand(2);
+ }
+
if (Hi.getOpcode() != RISCV::AUIPC)
ImmOp.setOffset(NewOffset);
@@ -501,6 +515,11 @@ bool RISCVMergeBaseOffsetOpt::foldIntoMemoryOps(MachineInstr &Hi,
}
}
+ // Prevent Lo (originally PseudoLIaddr, which is also pointed by Hi) from
+ // being erased
+ if (&Lo == &Hi)
+ return true;
+
MRI->replaceRegWith(Lo.getOperand(0).getReg(), Hi.getOperand(0).getReg());
Lo.eraseFromParent();
return true;
diff --git a/llvm/lib/Target/RISCV/RISCVPostRAExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVPostRAExpandPseudoInsts.cpp
index 52f2ce27164d6..ce82fbea10063 100644
--- a/llvm/lib/Target/RISCV/RISCVPostRAExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/RISCV/RISCVPostRAExpandPseudoInsts.cpp
@@ -44,6 +44,7 @@ class RISCVPostRAExpandPseudo : public MachineFunctionPass {
bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
MachineBasicBlock::iterator &NextMBBI);
bool expandMovImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);
+ bool expandLIaddr(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);
};
char RISCVPostRAExpandPseudo::ID = 0;
@@ -75,6 +76,8 @@ bool RISCVPostRAExpandPseudo::expandMI(MachineBasicBlock &MBB,
switch (MBBI->getOpcode()) {
case RISCV::PseudoMovImm:
return expandMovImm(MBB, MBBI);
+ case RISCV::PseudoLIaddr:
+ return expandLIaddr(MBB, MBBI);
default:
return false;
}
@@ -101,6 +104,26 @@ bool RISCVPostRAExpandPseudo::expandMovImm(MachineBasicBlock &MBB,
return true;
}
+bool RISCVPostRAExpandPseudo::expandLIaddr(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) {
+ DebugLoc DL = MBBI->getDebugLoc();
+
+ Register DstReg = MBBI->getOperand(0).getReg();
+ bool DstIsDead = MBBI->getOperand(0).isDead();
+ bool Renamable = MBBI->getOperand(0).isRenamable();
+
+ BuildMI(MBB, MBBI, DL, TII->get(RISCV::LUI))
+ .addReg(DstReg, RegState::Define | getRenamableRegState(Renamable))
+ .add(MBBI->getOperand(1));
+ BuildMI(MBB, MBBI, DL, TII->get(RISCV::ADDI))
+ .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead) |
+ getRenamableRegState(Renamable))
+ .addReg(DstReg, RegState::Kill | getRenamableRegState(Renamable))
+ .add(MBBI->getOperand(2));
+ MBBI->eraseFromParent();
+ return true;
+}
+
} // end of anonymous namespace
INITIALIZE_PASS(RISCVPostRAExpandPseudo, "riscv-expand-pseudolisimm32",
diff --git a/llvm/test/CodeGen/RISCV/bfloat-mem.ll b/llvm/test/CodeGen/RISCV/bfloat-mem.ll
index 4b6c0c29d660b..41af0ef238b10 100644
--- a/llvm/test/CodeGen/RISCV/bfloat-mem.ll
+++ b/llvm/test/CodeGen/RISCV/bfloat-mem.ll
@@ -54,10 +54,10 @@ define bfloat @flh_fsh_global(bfloat %a, bfloat %b) nounwind {
; CHECK-NEXT: fcvt.bf16.s fa0, fa5
; CHECK-NEXT: lui a0, %hi(G)
; CHECK-NEXT: flh fa5, %lo(G)(a0)
-; CHECK-NEXT: addi a1, a0, %lo(G)
+; CHECK-NEXT: lui a1, %hi(G+18)
; CHECK-NEXT: fsh fa0, %lo(G)(a0)
-; CHECK-NEXT: flh fa5, 18(a1)
-; CHECK-NEXT: fsh fa0, 18(a1)
+; CHECK-NEXT: flh fa5, %lo(G+18)(a1)
+; CHECK-NEXT: fsh fa0, %lo(G+18)(a1)
; CHECK-NEXT: ret
%1 = fadd bfloat %a, %b
%2 = load volatile bfloat, ptr @G
diff --git a/llvm/test/CodeGen/RISCV/byval.ll b/llvm/test/CodeGen/RISCV/byval.ll
index 9151f3b03e7c2..a244b9265b34b 100644
--- a/llvm/test/CodeGen/RISCV/byval.ll
+++ b/llvm/test/CodeGen/RISCV/byval.ll
@@ -22,8 +22,6 @@ define void @caller() nounwind {
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32I-NEXT: lui a0, %hi(foo)
-; RV32I-NEXT: lw a1, %lo(foo)(a0)
-; RV32I-NEXT: sw a1, 12(sp)
; RV32I-NEXT: addi a0, a0, %lo(foo)
; RV32I-NEXT: lw a1, 12(a0)
; RV32I-NEXT: sw a1, 24(sp)
@@ -31,6 +29,9 @@ define void @caller() nounwind {
; RV32I-NEXT: sw a1, 20(sp)
; RV32I-NEXT: lw a0, 4(a0)
; RV32I-NEXT: sw a0, 16(sp)
+; RV32I-NEXT: lui a0, %hi(foo)
+; RV32I-NEXT: lw a0, %lo(foo)(a0)
+; RV32I-NEXT: sw a0, 12(sp)
; RV32I-NEXT: addi a0, sp, 12
; RV32I-NEXT: call callee
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll b/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll
index 2122b3fd91788..57789f448d430 100644
--- a/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll
+++ b/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll
@@ -32,7 +32,8 @@ define void @callee() nounwind {
; ILP32-NEXT: flw fa4, %lo(var+4)(a0)
; ILP32-NEXT: flw fa3, %lo(var+8)(a0)
; ILP32-NEXT: flw fa2, %lo(var+12)(a0)
-; ILP32-NEXT: addi a1, a0, %lo(var)
+; ILP32-NEXT: lui a1, %hi(var)
+; ILP32-NEXT: addi a1, a1, %lo(var)
; ILP32-NEXT: flw fa1, 16(a1)
; ILP32-NEXT: flw fa0, 20(a1)
; ILP32-NEXT: flw ft0, 24(a1)
@@ -102,7 +103,8 @@ define void @callee() nounwind {
; ILP32E-NEXT: flw fa4, %lo(var+4)(a0)
; ILP32E-NEXT: flw fa3, %lo(var+8)(a0)
; ILP32E-NEXT: flw fa2, %lo(var+12)(a0)
-; ILP32E-NEXT: addi a1, a0, %lo(var)
+; ILP32E-NEXT: lui a1, %hi(var)
+; ILP32E-NEXT: addi a1, a1, %lo(var)
; ILP32E-NEXT: flw fa1, 16(a1)
; ILP32E-NEXT: flw fa0, 20(a1)
; ILP32E-NEXT: flw ft0, 24(a1)
@@ -172,7 +174,8 @@ define void @callee() nounwind {
; LP64-NEXT: flw fa4, %lo(var+4)(a0)
; LP64-NEXT: flw fa3, %lo(var+8)(a0)
; LP64-NEXT: flw fa2, %lo(var+12)(a0)
-; LP64-NEXT: addi a1, a0, %lo(var)
+; LP64-NEXT: lui a1, %hi(var)
+; LP64-NEXT: addi a1, a1, %lo(var)
; LP64-NEXT: flw fa1, 16(a1)
; LP64-NEXT: flw fa0, 20(a1)
; LP64-NEXT: flw ft0, 24(a1)
@@ -242,7 +245,8 @@ define void @callee() nounwind {
; LP64E-NEXT: flw fa4, %lo(var+4)(a0)
; LP64E-NEXT: flw fa3, %lo(var+8)(a0)
; LP64E-NEXT: flw fa2, %lo(var+12)(a0)
-; LP64E-NEXT: addi a1, a0, %lo(var)
+; LP64E-NEXT: lui a1, %hi(var)
+; LP64E-NEXT: addi a1, a1, %lo(var)
; LP64E-NEXT: flw fa1, 16(a1)
; LP64E-NEXT: flw fa0, 20(a1)
; LP64E-NEXT: flw ft0, 24(a1)
@@ -325,7 +329,8 @@ define void @callee() nounwind {
; ILP32F-NEXT: flw fa4, %lo(var+4)(a0)
; ILP32F-NEXT: flw fa3, %lo(var+8)(a0)
; ILP32F-NEXT: flw fa2, %lo(var+12)(a0)
-; ILP32F-NEXT: addi a1, a0, %lo(var)
+; ILP32F-NEXT: lui a1, %hi(var)
+; ILP32F-NEXT: addi a1, a1, %lo(var)
; ILP32F-NEXT: flw fa1, 16(a1)
; ILP32F-NEXT: flw fa0, 20(a1)
; ILP32F-NEXT: flw ft0, 24(a1)
@@ -421,7 +426,8 @@ define void @callee() nounwind {
; LP64F-NEXT: flw fa4, %lo(var+4)(a0)
; LP64F-NEXT: flw fa3, %lo(var+8)(a0)
; LP64F-NEXT: flw fa2, %lo(var+12)(a0)
-; LP64F-NEXT: addi a1, a0, %lo(var)
+; LP64F-NEXT: lui a1, %hi(var)
+; LP64F-NEXT: addi a1, a1, %lo(var)
; LP64F-NEXT: flw fa1, 16(a1)
; LP64F-NEXT: flw fa0, 20(a1)
; LP64F-NEXT: flw ft0, 24(a1)
@@ -517,7 +523,8 @@ define void @callee() nounwind {
; ILP32D-NEXT: flw fa4, %lo(var+4)(a0)
; ILP32D-NEXT: flw fa3, %lo(var+8)(a0)
; ILP32D-NEXT: flw fa2, %lo(var+12)(a0)
-; ILP32D-NEXT: addi a1, a0, %lo(var)
+; ILP32D-NEXT: lui a1, %hi(var)
+; ILP32D-NEXT: addi a1, a1, %lo(var)
; ILP32D-NEXT: flw fa1, 16(a1)
; ILP32D-NEXT: flw fa0, 20(a1)
; ILP32D-NEXT: flw ft0, 24(a1)
@@ -613,7 +620,8 @@ define void @callee() nounwind {
; LP64D-NEXT: flw fa4, %lo(var+4)(a0)
; LP64D-NEXT: flw fa3, %lo(var+8)(a0)
; LP64D-NEXT: flw fa2, %lo(var+12)(a0)
-; LP64D-NEXT: addi a1, a0, %lo(var)
+; LP64D-NEXT: lui a1, %hi(var)
+; LP64D-NEXT: addi a1, a1, %lo(var)
; LP64D-NEXT: flw fa1, 16(a1)
; LP64D-NEXT: flw fa0, 20(a1)
; LP64D-NEXT: flw ft0, 24(a1)
@@ -716,7 +724,8 @@ define void @caller() nounwind {
; ILP32-NEXT: fsw fa5, 120(sp) # 4-byte Folded Spill
; ILP32-NEXT: flw fa5, %lo(var+12)(s0)
; ILP32-NEXT: fsw fa5, 116(sp) # 4-byte Folded Spill
-; ILP32-NEXT: addi s1, s0, %lo(var)
+; ILP32-NEXT: lui s1, %hi(var)
+; ILP32-NEXT: addi s1, s1, %lo(var)
; ILP32-NEXT: flw fa5, 16(s1)
; ILP32-NEXT: fsw fa5, 112(sp) # 4-byte Folded Spill
; ILP32-NEXT: flw fa5, 20(s1)
@@ -859,7 +868,8 @@ define void @caller() nounwind {
; ILP32E-NEXT: fsw fa5, 116(sp) # 4-byte Folded Spill
; ILP32E-NEXT: flw fa5, %lo(var+12)(s0)
; ILP32E-NEXT: fsw fa5, 112(sp) # 4-byte Folded Spill
-; ILP32E-NEXT: addi s1, s0, %lo(var)
+; ILP32E-NEXT: lui s1, %hi(var)
+; ILP32E-NEXT: addi s1, s1, %lo(var)
; ILP32E-NEXT: flw fa5, 16(s1)
; ILP32E-NEXT: fsw fa5, 108(sp) # 4-byte Folded Spill
; ILP32E-NEXT: flw fa5, 20(s1)
@@ -1002,7 +1012,8 @@ define void @caller() nounwind {
; LP64-NEXT: fsw fa5, 124(sp) # 4-byte Folded Spill
; LP64-NEXT: flw fa5, %lo(var+12)(s0)
; LP64-NEXT: fsw fa5, 120(sp) # 4-byte Folded Spill
-; LP64-NEXT: addi s1, s0, %lo(var)
+; LP64-NEXT: lui s1, %hi(var)
+; LP64-NEXT: addi s1, s1, %lo(var)
; LP64-NEXT: flw fa5, 16(s1)
; LP64-NEXT: fsw fa5, 116(sp) # 4-byte Folded Spill
; LP64-NEXT: flw fa5, 20(s1)
@@ -1145,7 +1156,8 @@ define void @caller() nounwind {
; LP64E-NEXT: fsw fa5, 116(sp) # 4-byte Folded Spill
; LP64E-NEXT: flw fa5, %lo(var+12)(s0)
; LP64E-NEXT: fsw fa5, 112(sp) # 4-byte Folded Spill
-; LP64E-NEXT: addi s1, s0, %lo(var)
+; LP64E-NEXT: lui s1, %hi(var)
+; LP64E-NEXT: addi s1, s1, %lo(var)
; LP64E-NEXT: flw fa5, 16(s1)
; LP64E-NEXT: fsw fa5, 108(sp) # 4-byte Folded Spill
; LP64E-NEXT: flw fa5, 20(s1)
@@ -1300,7 +1312,8 @@ define void @caller() nounwind {
; ILP32F-NEXT: fsw fa5, 72(sp) # 4-byte Folded Spill
; ILP32F-NEXT: flw fa5, %lo(var+12)(s0)
; ILP32F-NEXT: fsw fa5, 68(sp) # 4-byte Folded Spill
-; ILP32F-NEXT: addi s1, s0, %lo(var)
+; ILP32F-NEXT: lui s1, %hi(var)
+; ILP32F-NEXT: addi s1, s1, %lo(var)
; ILP32F-NEXT: flw fa5, 16(s1)
; ILP32F-NEXT: fsw fa5, 64(sp) # 4-byte Folded Spill
; ILP32F-NEXT: flw fa5, 20(s1)
@@ -1443,7 +1456,8 @@ define void @caller() nounwind {
; LP64F-NEXT: fsw fa5, 76(sp) # 4-byte Folded Spill
; LP64F-NEXT: flw fa5, %lo(var+12)(s0)
; LP64F-NEXT: fsw fa5, 72(sp) # 4-byte Folded Spill
-; LP64F-NEXT: addi s1, s0, %lo(var)
+; LP64F-NEXT: lui s1, %hi(var)
+; LP64F-NEXT: addi s1, s1, %lo(var)
; LP64F-NEXT: flw fa5, 16(s1)
; LP64F-NEXT: fsw fa5, 68(sp) # 4-byte Folded Spill
; LP64F-NEXT: flw fa5, 20(s1)
@@ -1586,7 +1600,8 @@ define void @caller() nounwind {
; ILP32D-NEXT: fsw fa5, 68(sp) # 4-byte Folded Spill
; ILP32D-NEXT: flw fa5, %lo(var+12)(s0)
; ILP32D-NEXT: fsw fa5, 64(sp) # 4-byte Folded Spill
-; ILP32D-NEXT: addi s1, s0, %lo(var)
+; ILP32D-NEXT: lui s1, %hi(var)
+; ILP32D-NEXT: addi s1, s1, %lo(var)
; ILP32D-NEXT: flw fa5, 16(s1)
; ILP32D-NEXT: fsw fa5, 60(sp) # 4-byte Folded Spill
; ILP32D-NEXT: flw fa5, 20(s1)
@@ -1729,7 +1744,8 @@ define void @caller() nounwind {
; LP64D-NEXT: fsw fa5, 76(sp) # 4-byte Folded Spill
; LP64D-NEXT: flw fa5, %lo(var+12)(s0)
; LP64D-NEXT: fsw fa5, 72(sp) # 4-byte Folded Spill
-; LP64D-NEXT: addi s1, s0, %lo(var)
+; LP64D-NEXT: lui s1, %hi(var)
+; LP64D-NEXT: addi s1, s1, %lo(var)
; LP64D-NEXT: flw fa5, 16(s1)
; LP64D-NEXT: fsw fa5, 68(sp) # 4-byte Folded Spill
; LP64D-NEXT: flw fa5, 20(s1)
diff --git a/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll b/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll
index 38e3c2d9256cd..397a51e5e97a1 100644
--- a/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll
+++ b/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll
@@ -24,7 +24,8 @@ define void @callee() nounwind {
; ILP32-NEXT: lui a0, %hi(var)
; ILP32-NEXT: fld fa5, %lo(var)(a0)
; ILP32-NEXT: fld fa4, %lo(var+8)(a0)
-; ILP32-NEXT: addi a1, a0, %lo(var)
+; ILP32-NEXT: lui a1, %hi(var)
+; ILP32-NEXT: addi a1, a1, %lo(var)
; ILP32-NEXT: fld fa3, 16(a1)
; ILP32-NEXT: fld fa2, 24(a1)
; ILP32-NEXT: fld fa1, 32(a1)
@@ -94,7 +95,8 @@ define void @callee() nounwind {
; LP64-NEXT: lui a0, %hi(var)
; LP64-NEXT: fld fa5, %lo(var)(a0)
; LP64-NEXT: fld fa4, %lo(var+8)(a0)
-; LP64-NEXT: addi a1, a0, %lo(var)
+; LP64-NEXT: lui a1, %hi(var)
+; LP64-NEXT: addi a1, a1, %lo(var)
; LP64-NEXT: fld fa3, 16(a1)
; LP64-NEXT: fld fa2, 24(a1)
; LP64-NEXT: fld fa1, 32(a1)
@@ -164,7 +166,8 @@ define void @callee() nounwind {
; LP64E-NEXT: lui a0, %hi(var)
; LP64E-NEXT: fld fa5, %lo(var)(a0)
; LP64E-NEXT: fld fa4, %lo(var+8)(a0)
-; LP64E-NEXT: addi a1, a0, %lo(var)
+; LP64E-NEXT: lui a1, %hi(var)
+; LP64E-NEXT: addi a1, a1, %lo(var)
; LP64E-NEXT: fld fa3, 16(a1)
; LP64E-NEXT: fld fa2, 24(a1)
; LP64E-NEXT: fld fa1, 32(a1)
@@ -247,7 +250,8 @@ define void @callee() nounwind {
; ILP32D-NEXT: lui a0, %hi(var)
; ILP32D-NEXT: fld fa5, %lo(var)(a0)
; ILP32D-NEXT: fld fa4, %lo(var+8)(a0)
-; ILP32D-NEXT: addi a1, a0, %lo(var)
+; ILP32D-NEXT: lui a1, %hi(var)
+; ILP32D-NEXT: addi a1, a1, %lo(var)
; ILP32D-NEXT: fld fa3, 16(a1)
; ILP32D-NEXT: fld fa2, 24(a1)
; ILP32D-NEXT: fld fa1, 32(a1)
@@ -343,7 +347,8 @@ define void @callee() nounwind {
; LP64D-NEXT: lui a0, %hi(var)
; LP64D-NEXT: fld fa5, %lo(var)(a0)
; LP64D-NEXT: fld fa4, %lo(var+8)(a0)
-; LP64D-NEXT: addi a1, a0, %lo(var)
+; LP64D-NEXT: lui a1, %hi(var)
+; LP64D-NEXT: addi a1, a1, %lo(var)
; LP64D-NEXT: fld fa3, 16(a1)
; LP64D-NEXT: fld fa2, 24(a1)
; LP64D-NEXT: fld fa1, 32(a1)
@@ -444,7 +449,8 @@ define void @caller() nounwind {
; ILP32-NEXT: fsd fa5, 248(sp) # 8-byte Folded Spill
; ILP32-NEXT: fld fa5, %lo(var+8)(s0)
; ILP32-NEXT: fsd fa5, 240(sp) # 8-byte Folded Spill
-; ILP32-NEXT: addi s1, s0, %lo(var)
+; ILP32-NEXT: lui s1, %hi(var)
+; ILP32-NEXT: addi s1, s1, %lo(var)
; ILP32-NEXT: fld fa5, 16(s1)
; ILP32-NEXT: fsd fa5, 232(sp) # 8-byte Folded Spill
; ILP32-NEXT: fld fa5, 24(s1)
@@ -587,7 +593,8 @@ define void @caller() nounwind {
; LP64-NEXT: fsd fa5, 256(sp) # 8-byte Folded Spill
; LP64-NEXT: fld fa5, %lo(var+8)(s0)
; LP64-NEXT: fsd fa5, 248(sp) # 8-byte Folded Spill
-; LP64-NEXT: addi s1, s0, %lo(var)
+; LP64-NEXT: lui s1, %hi(var)
+; LP64-NEXT: addi s1, s1, %lo(var)
; LP64-NEXT: fld fa5, 16(s1)
; LP64-NEXT: fsd fa5, 240(sp) # 8-byte Folded Spill
; LP64-NEXT: fld fa5, 24(s1)
@@ -730,7 +737,8 @@ define void @caller() nounwind {
; LP64E-NEXT: fsd fa5, 248(sp) # 8-byte Folded Spill
; LP64E-NEXT: fld fa5, %lo(var+8)(s0)
; LP64E-NEXT: fsd fa5, 240(sp) # 8-byte Folded Spill
-; LP64E-NEXT: addi s1, s0, %lo(var)
+; LP64E-NEXT: lui s1, %hi(var)
+; LP64E-NEXT: addi s1, s1, %lo(var)
; LP64E-NEXT: fld fa5, 16(s1)
; LP64E-NEXT: fsd fa5, 232(sp) # 8-byte Folded Spill
; LP64E-NEXT: fld fa5, 24(s1)
@@ -885,7 +893,8 @@ define void @caller() nounwind {
; ILP32D-NEXT: fsd fa5, 152(sp) # 8-byte Folded Spill
; ILP32D-NEXT: fld fa5, %lo(var+8)(s0)
; ILP32D-NEXT: fsd fa5, 144(sp) # 8-byte Folded Spill
-; ILP32D-NEXT: addi s1, s0, %lo(var)
+; ILP32D-NEXT: lui s1, %hi(var)
+; ILP32D-NEXT: addi s1, s1, %lo(var)
; ILP32D-NEXT: fld fa5, 16(s1)
; ILP32D-NEXT: fsd fa5, 136(sp) # 8-byte Folded Spill
; ILP32D-NEXT: fld fa5, 24(s1)
@@ -1028,7 +1037,8 @@ define void @caller() nounwind {
; LP64D-NEXT: fsd fa5, 160(sp) # 8-byte Folded Spill
; LP64D-NEXT: fld fa5, %lo(var+8)(s0)
; LP64D-NEXT: fsd fa5, 152(sp) # 8-byte Folded Spill
-; LP64D-NEXT: addi s1, s0, %lo(var)
+; LP64D-NEXT: lui s1, %hi(var)
+; LP64D-NEXT: addi s1, s1, %lo(var)
; LP64D-NEXT: fld fa5, 16(s1)
; LP64D-NEXT: fsd fa5, 144(sp) # 8-byte Folded Spill
; LP64D-NEXT: fld fa5, 24(s1)
diff --git a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
index 5e8ed4509b535..73509aa2a5a06 100644
--- a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
+++ b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
@@ -54,16 +54,17 @@ define void @callee() nounwind {
; RV32I-NEXT: sw s9, 36(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s10, 32(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s11, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lui a6, %hi(var)
-; RV32I-NEXT: lw a0, %lo(var)(a6)
+; RV32I-NEXT: lui a7, %hi(var)
+; RV32I-NEXT: lw a0, %lo(var)(a7)
; RV32I-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lw a0, %lo(var+4)(a6)
+; RV32I-NEXT: lw a0, %lo(var+4)(a7)
; RV32I-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lw a0, %lo(var+8)(a6)
+; RV32I-NEXT: lw a0, %lo(var+8)(a7)
; RV32I-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lw a0, %lo(var+12)(a6)
+; RV32I-NEXT: lw a0, %lo(var+12)(a7)
; RV32I-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: addi a5, a6, %lo(var)
+; RV32I-NEXT: lui a5, %hi(var)
+; RV32I-NEXT: addi a5, a5, %lo(var)
; RV32I-NEXT: lw a0, 16(a5)
; RV32I-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: lw a0, 20(a5)
@@ -88,7 +89,7 @@ define void @callee() nounwind {
; RV32I-NEXT: lw s10, 92(a5)
; RV32I-NEXT: lw s11, 96(a5)
; RV32I-NEXT: lw ra, 100(a5)
-; RV32I-NEXT: lw a7, 104(a5)
+; RV32I-NEXT: lw a6, 104(a5)
; RV32I-NEXT: lw a4, 108(a5)
; RV32I-NEXT: lw a0, 124(a5)
; RV32I-NEXT: lw a1, 120(a5)
@@ -99,7 +100,7 @@ define void @callee() nounwind {
; RV32I-NEXT: sw a2, 116(a5)
; RV32I-NEXT: sw a3, 112(a5)
; RV32I-NEXT: sw a4, 108(a5)
-; RV32I-NEXT: sw a7, 104(a5)
+; RV32I-NEXT: sw a6, 104(a5)
; RV32I-NEXT: sw ra, 100(a5)
; RV32I-NEXT: sw s11, 96(a5)
; RV32I-NEXT: sw s10, 92(a5)
@@ -125,13 +126,13 @@ define void @callee() nounwind {
; RV32I-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: sw a0, 16(a5)
; RV32I-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a0, %lo(var+12)(a6)
+; RV32I-NEXT: sw a0, %lo(var+12)(a7)
; RV32I-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a0, %lo(var+8)(a6)
+; RV32I-NEXT: sw a0, %lo(var+8)(a7)
; RV32I-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a0, %lo(var+4)(a6)
+; RV32I-NEXT: sw a0, %lo(var+4)(a7)
; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a0, %lo(var)(a6)
+; RV32I-NEXT: sw a0, %lo(var)(a7)
; RV32I-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 68(sp) # 4-byte Folded Reload
@@ -154,16 +155,17 @@ define void @callee() nounwind {
; RV32I-ILP32E-NEXT: sw ra, 32(sp) # 4-byte Folded Spill
; RV32I-ILP32E-NEXT: sw s0, 28(sp) # 4-byte Folded Spill
; RV32I-ILP32E-NEXT: sw s1, 24(sp) # 4-byte Folded Spill
-; RV32I-ILP32E-NEXT: lui a6, %hi(var)
-; RV32I-ILP32E-NEXT: lw a0, %lo(var)(a6)
+; RV32I-ILP32E-NEXT: lui a7, %hi(var)
+; RV32I-ILP32E-NEXT: lw a0, %lo(var)(a7)
; RV32I-ILP32E-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
-; RV32I-ILP32E-NEXT: lw a0, %lo(var+4)(a6)
+; RV32I-ILP32E-NEXT: lw a0, %lo(var+4)(a7)
; RV32I-ILP32E-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
-; RV32I-ILP32E-NEXT: lw a0, %lo(var+8)(a6)
+; RV32I-ILP32E-NEXT: lw a0, %lo(var+8)(a7)
; RV32I-ILP32E-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
-; RV32I-ILP32E-NEXT: lw a0, %lo(var+12)(a6)
+; RV32I-ILP32E-NEXT: lw a0, %lo(var+12)(a7)
; RV32I-ILP32E-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
-; RV32I-ILP32E-NEXT: addi a5, a6, %lo(var)
+; RV32I-ILP32E-NEXT: lui a5, %hi(var)
+; RV32I-ILP32E-NEXT: addi a5, a5, %lo(var)
; RV32I-ILP32E-NEXT: lw a0, 16(a5)
; RV32I-ILP32E-NEXT: sw a0, 4(sp) # 4-byte Folded Spill
; RV32I-ILP32E-NEXT: lw a0, 20(a5)
@@ -188,7 +190,7 @@ define void @callee() nounwind {
; RV32I-ILP32E-NEXT: lw s0, 92(a5)
; RV32I-ILP32E-NEXT: lw s1, 96(a5)
; RV32I-ILP32E-NEXT: lw ra, 100(a5)
-; RV32I-ILP32E-NEXT: lw a7, 104(a5)
+; RV32I-ILP32E-NEXT: lw a6, 104(a5)
; RV32I-ILP32E-NEXT: lw a4, 108(a5)
; RV32I-ILP32E-NEXT: lw a0, 124(a5)
; RV32I-ILP32E-NEXT: lw a1, 120(a5)
@@ -199,7 +201,7 @@ define void @callee() nounwind {
; RV32I-ILP32E-NEXT: sw a2, 116(a5)
; RV32I-ILP32E-NEXT: sw a3, 112(a5)
; RV32I-ILP32E-NEXT: sw a4, 108(a5)
-; RV32I-ILP32E-NEXT: sw a7, 104(a5)
+; RV32I-ILP32E-NEXT: sw a6, 104(a5)
; RV32I-ILP32E-NEXT: sw ra, 100(a5)
; RV32I-ILP32E-NEXT: sw s1, 96(a5)
; RV32I-ILP32E-NEXT: sw s0, 92(a5)
@@ -225,13 +227,13 @@ define void @callee() nounwind {
; RV32I-ILP32E-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
; RV32I-ILP32E-NEXT: sw a0, 16(a5)
; RV32I-ILP32E-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32I-ILP32E-NEXT: sw a0, %lo(var+12)(a6)
+; RV32I-ILP32E-NEXT: sw a0, %lo(var+12)(a7)
; RV32I-ILP32E-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32I-ILP32E-NEXT: sw a0, %lo(var+8)(a6)
+; RV32I-ILP32E-NEXT: sw a0, %lo(var+8)(a7)
; RV32I-ILP32E-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32I-ILP32E-NEXT: sw a0, %lo(var+4)(a6)
+; RV32I-ILP32E-NEXT: sw a0, %lo(var+4)(a7)
; RV32I-ILP32E-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32I-ILP32E-NEXT: sw a0, %lo(var)(a6)
+; RV32I-ILP32E-NEXT: sw a0, %lo(var)(a7)
; RV32I-ILP32E-NEXT: lw ra, 32(sp) # 4-byte Folded Reload
; RV32I-ILP32E-NEXT: lw s0, 28(sp) # 4-byte Folded Reload
; RV32I-ILP32E-NEXT: lw s1, 24(sp) # 4-byte Folded Reload
@@ -255,16 +257,17 @@ define void @callee() nounwind {
; RV32I-WITH-FP-NEXT: sw s10, 32(sp) # 4-byte Folded Spill
; RV32I-WITH-FP-NEXT: sw s11, 28(sp) # 4-byte Folded Spill
; RV32I-WITH-FP-NEXT: addi s0, sp, 80
-; RV32I-WITH-FP-NEXT: lui a6, %hi(var)
-; RV32I-WITH-FP-NEXT: lw a0, %lo(var)(a6)
+; RV32I-WITH-FP-NEXT: lui t0, %hi(var)
+; RV32I-WITH-FP-NEXT: lw a0, %lo(var)(t0)
; RV32I-WITH-FP-NEXT: sw a0, -56(s0) # 4-byte Folded Spill
-; RV32I-WITH-FP-NEXT: lw a0, %lo(var+4)(a6)
+; RV32I-WITH-FP-NEXT: lw a0, %lo(var+4)(t0)
; RV32I-WITH-FP-NEXT: sw a0, -60(s0) # 4-byte Folded Spill
-; RV32I-WITH-FP-NEXT: lw a0, %lo(var+8)(a6)
+; RV32I-WITH-FP-NEXT: lw a0, %lo(var+8)(t0)
; RV32I-WITH-FP-NEXT: sw a0, -64(s0) # 4-byte Folded Spill
-; RV32I-WITH-FP-NEXT: lw a0, %lo(var+12)(a6)
+; RV32I-WITH-FP-NEXT: lw a0, %lo(var+12)(t0)
; RV32I-WITH-FP-NEXT: sw a0, -68(s0) # 4-byte Folded Spill
-; RV32I-WITH-FP-NEXT: addi a5, a6, %lo(var)
+; RV32I-WITH-FP-NEXT: lui a5, %hi(var)
+; RV32I-WITH-FP-NEXT: addi a5, a5, %lo(var)
; RV32I-WITH-FP-NEXT: lw a0, 16(a5)
; RV32I-WITH-FP-NEXT: sw a0, -72(s0) # 4-byte Folded Spill
; RV32I-WITH-FP-NEXT: lw a0, 20(a5)
@@ -289,8 +292,8 @@ define void @callee() nounwind {
; RV32I-WITH-FP-NEXT: lw s10, 88(a5)
; RV32I-WITH-FP-NEXT: lw s11, 92(a5)
; RV32I-WITH-FP-NEXT: lw ra, 96(a5)
-; RV32I-WITH-FP-NEXT: lw t0, 100(a5)
-; RV32I-WITH-FP-NEXT: lw a7, 104(a5)
+; RV32I-WITH-FP-NEXT: lw a7, 100(a5)
+; RV32I-WITH-FP-NEXT: lw a6, 104(a5)
; RV32I-WITH-FP-NEXT: lw a4, 108(a5)
; RV32I-WITH-FP-NEXT: lw a0, 124(a5)
; RV32I-WITH-FP-NEXT: lw a1, 120(a5)
@@ -301,8 +304,8 @@ define void @callee() nounwind {
; RV32I-WITH-FP-NEXT: sw a2, 116(a5)
; RV32I-WITH-FP-NEXT: sw a3, 112(a5)
; RV32I-WITH-FP-NEXT: sw a4, 108(a5)
-; RV32I-WITH-FP-NEXT: sw a7, 104(a5)
-; RV32I-WITH-FP-NEXT: sw t0, 100(a5)
+; RV32I-WITH-FP-NEXT: sw a6, 104(a5)
+; RV32I-WITH-FP-NEXT: sw a7, 100(a5)
; RV32I-WITH-FP-NEXT: sw ra, 96(a5)
; RV32I-WITH-FP-NEXT: sw s11, 92(a5)
; RV32I-WITH-FP-NEXT: sw s10, 88(a5)
@@ -328,13 +331,13 @@ define void @callee() nounwind {
; RV32I-WITH-FP-NEXT: lw a0, -72(s0) # 4-byte Folded Reload
; RV32I-WITH-FP-NEXT: sw a0, 16(a5)
; RV32I-WITH-FP-NEXT: lw a0, -68(s0) # 4-byte Folded Reload
-; RV32I-WITH-FP-NEXT: sw a0, %lo(var+12)(a6)
+; RV32I-WITH-FP-NEXT: sw a0, %lo(var+12)(t0)
; RV32I-WITH-FP-NEXT: lw a0, -64(s0) # 4-byte Folded Reload
-; RV32I-WITH-FP-NEXT: sw a0, %lo(var+8)(a6)
+; RV32I-WITH-FP-NEXT: sw a0, %lo(var+8)(t0)
; RV32I-WITH-FP-NEXT: lw a0, -60(s0) # 4-byte Folded Reload
-; RV32I-WITH-FP-NEXT: sw a0, %lo(var+4)(a6)
+; RV32I-WITH-FP-NEXT: sw a0, %lo(var+4)(t0)
; RV32I-WITH-FP-NEXT: lw a0, -56(s0) # 4-byte Folded Reload
-; RV32I-WITH-FP-NEXT: sw a0, %lo(var)(a6)
+; RV32I-WITH-FP-NEXT: sw a0, %lo(var)(t0)
; RV32I-WITH-FP-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32I-WITH-FP-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
; RV32I-WITH-FP-NEXT: lw s1, 68(sp) # 4-byte Folded Reload
@@ -354,16 +357,17 @@ define void @callee() nounwind {
; RV32IZCMP-LABEL: callee:
; RV32IZCMP: # %bb.0:
; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -96
-; RV32IZCMP-NEXT: lui a6, %hi(var)
-; RV32IZCMP-NEXT: lw a0, %lo(var)(a6)
+; RV32IZCMP-NEXT: lui a7, %hi(var)
+; RV32IZCMP-NEXT: lw a0, %lo(var)(a7)
; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: lw a0, %lo(var+4)(a6)
+; RV32IZCMP-NEXT: lw a0, %lo(var+4)(a7)
; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: lw a0, %lo(var+8)(a6)
+; RV32IZCMP-NEXT: lw a0, %lo(var+8)(a7)
; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: lw a0, %lo(var+12)(a6)
+; RV32IZCMP-NEXT: lw a0, %lo(var+12)(a7)
; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: addi a5, a6, %lo(var)
+; RV32IZCMP-NEXT: lui a5, %hi(var)
+; RV32IZCMP-NEXT: addi a5, a5, %lo(var)
; RV32IZCMP-NEXT: lw a0, 16(a5)
; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 20(a5)
@@ -388,7 +392,7 @@ define void @callee() nounwind {
; RV32IZCMP-NEXT: lw t1, 92(a5)
; RV32IZCMP-NEXT: lw t0, 96(a5)
; RV32IZCMP-NEXT: lw s0, 100(a5)
-; RV32IZCMP-NEXT: lw a7, 104(a5)
+; RV32IZCMP-NEXT: lw a6, 104(a5)
; RV32IZCMP-NEXT: lw a4, 108(a5)
; RV32IZCMP-NEXT: lw a0, 124(a5)
; RV32IZCMP-NEXT: lw a1, 120(a5)
@@ -399,7 +403,7 @@ define void @callee() nounwind {
; RV32IZCMP-NEXT: sw a2, 116(a5)
; RV32IZCMP-NEXT: sw a3, 112(a5)
; RV32IZCMP-NEXT: sw a4, 108(a5)
-; RV32IZCMP-NEXT: sw a7, 104(a5)
+; RV32IZCMP-NEXT: sw a6, 104(a5)
; RV32IZCMP-NEXT: sw s0, 100(a5)
; RV32IZCMP-NEXT: sw t0, 96(a5)
; RV32IZCMP-NEXT: sw t1, 92(a5)
@@ -425,13 +429,13 @@ define void @callee() nounwind {
; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: sw a0, 16(a5)
; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var+12)(a6)
+; RV32IZCMP-NEXT: sw a0, %lo(var+12)(a7)
; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var+8)(a6)
+; RV32IZCMP-NEXT: sw a0, %lo(var+8)(a7)
; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var+4)(a6)
+; RV32IZCMP-NEXT: sw a0, %lo(var+4)(a7)
; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var)(a6)
+; RV32IZCMP-NEXT: sw a0, %lo(var)(a7)
; RV32IZCMP-NEXT: cm.popret {ra, s0-s11}, 96
;
; RV32IZCMP-WITH-FP-LABEL: callee:
@@ -451,16 +455,17 @@ define void @callee() nounwind {
; RV32IZCMP-WITH-FP-NEXT: sw s10, 32(sp) # 4-byte Folded Spill
; RV32IZCMP-WITH-FP-NEXT: sw s11, 28(sp) # 4-byte Folded Spill
; RV32IZCMP-WITH-FP-NEXT: addi s0, sp, 80
-; RV32IZCMP-WITH-FP-NEXT: lui a6, %hi(var)
-; RV32IZCMP-WITH-FP-NEXT: lw a0, %lo(var)(a6)
+; RV32IZCMP-WITH-FP-NEXT: lui t0, %hi(var)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, %lo(var)(t0)
; RV32IZCMP-WITH-FP-NEXT: sw a0, -56(s0) # 4-byte Folded Spill
-; RV32IZCMP-WITH-FP-NEXT: lw a0, %lo(var+4)(a6)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, %lo(var+4)(t0)
; RV32IZCMP-WITH-FP-NEXT: sw a0, -60(s0) # 4-byte Folded Spill
-; RV32IZCMP-WITH-FP-NEXT: lw a0, %lo(var+8)(a6)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, %lo(var+8)(t0)
; RV32IZCMP-WITH-FP-NEXT: sw a0, -64(s0) # 4-byte Folded Spill
-; RV32IZCMP-WITH-FP-NEXT: lw a0, %lo(var+12)(a6)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, %lo(var+12)(t0)
; RV32IZCMP-WITH-FP-NEXT: sw a0, -68(s0) # 4-byte Folded Spill
-; RV32IZCMP-WITH-FP-NEXT: addi a5, a6, %lo(var)
+; RV32IZCMP-WITH-FP-NEXT: lui a5, %hi(var)
+; RV32IZCMP-WITH-FP-NEXT: addi a5, a5, %lo(var)
; RV32IZCMP-WITH-FP-NEXT: lw a0, 16(a5)
; RV32IZCMP-WITH-FP-NEXT: sw a0, -72(s0) # 4-byte Folded Spill
; RV32IZCMP-WITH-FP-NEXT: lw a0, 20(a5)
@@ -485,8 +490,8 @@ define void @callee() nounwind {
; RV32IZCMP-WITH-FP-NEXT: lw t2, 88(a5)
; RV32IZCMP-WITH-FP-NEXT: lw s1, 92(a5)
; RV32IZCMP-WITH-FP-NEXT: lw t1, 96(a5)
-; RV32IZCMP-WITH-FP-NEXT: lw t0, 100(a5)
-; RV32IZCMP-WITH-FP-NEXT: lw a7, 104(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw a7, 100(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw a6, 104(a5)
; RV32IZCMP-WITH-FP-NEXT: lw a4, 108(a5)
; RV32IZCMP-WITH-FP-NEXT: lw a0, 124(a5)
; RV32IZCMP-WITH-FP-NEXT: lw a1, 120(a5)
@@ -497,8 +502,8 @@ define void @callee() nounwind {
; RV32IZCMP-WITH-FP-NEXT: sw a2, 116(a5)
; RV32IZCMP-WITH-FP-NEXT: sw a3, 112(a5)
; RV32IZCMP-WITH-FP-NEXT: sw a4, 108(a5)
-; RV32IZCMP-WITH-FP-NEXT: sw a7, 104(a5)
-; RV32IZCMP-WITH-FP-NEXT: sw t0, 100(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw a6, 104(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw a7, 100(a5)
; RV32IZCMP-WITH-FP-NEXT: sw t1, 96(a5)
; RV32IZCMP-WITH-FP-NEXT: sw s1, 92(a5)
; RV32IZCMP-WITH-FP-NEXT: sw t2, 88(a5)
@@ -524,13 +529,13 @@ define void @callee() nounwind {
; RV32IZCMP-WITH-FP-NEXT: lw a0, -72(s0) # 4-byte Folded Reload
; RV32IZCMP-WITH-FP-NEXT: sw a0, 16(a5)
; RV32IZCMP-WITH-FP-NEXT: lw a0, -68(s0) # 4-byte Folded Reload
-; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var+12)(a6)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var+12)(t0)
; RV32IZCMP-WITH-FP-NEXT: lw a0, -64(s0) # 4-byte Folded Reload
-; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var+8)(a6)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var+8)(t0)
; RV32IZCMP-WITH-FP-NEXT: lw a0, -60(s0) # 4-byte Folded Reload
-; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var+4)(a6)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var+4)(t0)
; RV32IZCMP-WITH-FP-NEXT: lw a0, -56(s0) # 4-byte Folded Reload
-; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var)(a6)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var)(t0)
; RV32IZCMP-WITH-FP-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32IZCMP-WITH-FP-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
; RV32IZCMP-WITH-FP-NEXT: lw s1, 68(sp) # 4-byte Folded Reload
@@ -563,16 +568,17 @@ define void @callee() nounwind {
; RV64I-NEXT: sd s9, 72(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s10, 64(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s11, 56(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lui a6, %hi(var)
-; RV64I-NEXT: lw a0, %lo(var)(a6)
+; RV64I-NEXT: lui a7, %hi(var)
+; RV64I-NEXT: lw a0, %lo(var)(a7)
; RV64I-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lw a0, %lo(var+4)(a6)
+; RV64I-NEXT: lw a0, %lo(var+4)(a7)
; RV64I-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lw a0, %lo(var+8)(a6)
+; RV64I-NEXT: lw a0, %lo(var+8)(a7)
; RV64I-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lw a0, %lo(var+12)(a6)
+; RV64I-NEXT: lw a0, %lo(var+12)(a7)
; RV64I-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: addi a5, a6, %lo(var)
+; RV64I-NEXT: lui a5, %hi(var)
+; RV64I-NEXT: addi a5, a5, %lo(var)
; RV64I-NEXT: lw a0, 16(a5)
; RV64I-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, 20(a5)
@@ -597,7 +603,7 @@ define void @callee() nounwind {
; RV64I-NEXT: lw s10, 92(a5)
; RV64I-NEXT: lw s11, 96(a5)
; RV64I-NEXT: lw ra, 100(a5)
-; RV64I-NEXT: lw a7, 104(a5)
+; RV64I-NEXT: lw a6, 104(a5)
; RV64I-NEXT: lw a4, 108(a5)
; RV64I-NEXT: lw a0, 124(a5)
; RV64I-NEXT: lw a1, 120(a5)
@@ -608,7 +614,7 @@ define void @callee() nounwind {
; RV64I-NEXT: sw a2, 116(a5)
; RV64I-NEXT: sw a3, 112(a5)
; RV64I-NEXT: sw a4, 108(a5)
-; RV64I-NEXT: sw a7, 104(a5)
+; RV64I-NEXT: sw a6, 104(a5)
; RV64I-NEXT: sw ra, 100(a5)
; RV64I-NEXT: sw s11, 96(a5)
; RV64I-NEXT: sw s10, 92(a5)
@@ -634,13 +640,13 @@ define void @callee() nounwind {
; RV64I-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: sw a0, 16(a5)
; RV64I-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sw a0, %lo(var+12)(a6)
+; RV64I-NEXT: sw a0, %lo(var+12)(a7)
; RV64I-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sw a0, %lo(var+8)(a6)
+; RV64I-NEXT: sw a0, %lo(var+8)(a7)
; RV64I-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sw a0, %lo(var+4)(a6)
+; RV64I-NEXT: sw a0, %lo(var+4)(a7)
; RV64I-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sw a0, %lo(var)(a6)
+; RV64I-NEXT: sw a0, %lo(var)(a7)
; RV64I-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 144(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 136(sp) # 8-byte Folded Reload
@@ -663,16 +669,17 @@ define void @callee() nounwind {
; RV64I-LP64E-NEXT: sd ra, 64(sp) # 8-byte Folded Spill
; RV64I-LP64E-NEXT: sd s0, 56(sp) # 8-byte Folded Spill
; RV64I-LP64E-NEXT: sd s1, 48(sp) # 8-byte Folded Spill
-; RV64I-LP64E-NEXT: lui a6, %hi(var)
-; RV64I-LP64E-NEXT: lw a0, %lo(var)(a6)
+; RV64I-LP64E-NEXT: lui a7, %hi(var)
+; RV64I-LP64E-NEXT: lw a0, %lo(var)(a7)
; RV64I-LP64E-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64I-LP64E-NEXT: lw a0, %lo(var+4)(a6)
+; RV64I-LP64E-NEXT: lw a0, %lo(var+4)(a7)
; RV64I-LP64E-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64I-LP64E-NEXT: lw a0, %lo(var+8)(a6)
+; RV64I-LP64E-NEXT: lw a0, %lo(var+8)(a7)
; RV64I-LP64E-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
-; RV64I-LP64E-NEXT: lw a0, %lo(var+12)(a6)
+; RV64I-LP64E-NEXT: lw a0, %lo(var+12)(a7)
; RV64I-LP64E-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
-; RV64I-LP64E-NEXT: addi a5, a6, %lo(var)
+; RV64I-LP64E-NEXT: lui a5, %hi(var)
+; RV64I-LP64E-NEXT: addi a5, a5, %lo(var)
; RV64I-LP64E-NEXT: lw a0, 16(a5)
; RV64I-LP64E-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
; RV64I-LP64E-NEXT: lw a0, 20(a5)
@@ -697,7 +704,7 @@ define void @callee() nounwind {
; RV64I-LP64E-NEXT: lw s0, 92(a5)
; RV64I-LP64E-NEXT: lw s1, 96(a5)
; RV64I-LP64E-NEXT: lw ra, 100(a5)
-; RV64I-LP64E-NEXT: lw a7, 104(a5)
+; RV64I-LP64E-NEXT: lw a6, 104(a5)
; RV64I-LP64E-NEXT: lw a4, 108(a5)
; RV64I-LP64E-NEXT: lw a0, 124(a5)
; RV64I-LP64E-NEXT: lw a1, 120(a5)
@@ -708,7 +715,7 @@ define void @callee() nounwind {
; RV64I-LP64E-NEXT: sw a2, 116(a5)
; RV64I-LP64E-NEXT: sw a3, 112(a5)
; RV64I-LP64E-NEXT: sw a4, 108(a5)
-; RV64I-LP64E-NEXT: sw a7, 104(a5)
+; RV64I-LP64E-NEXT: sw a6, 104(a5)
; RV64I-LP64E-NEXT: sw ra, 100(a5)
; RV64I-LP64E-NEXT: sw s1, 96(a5)
; RV64I-LP64E-NEXT: sw s0, 92(a5)
@@ -734,13 +741,13 @@ define void @callee() nounwind {
; RV64I-LP64E-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
; RV64I-LP64E-NEXT: sw a0, 16(a5)
; RV64I-LP64E-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
-; RV64I-LP64E-NEXT: sw a0, %lo(var+12)(a6)
+; RV64I-LP64E-NEXT: sw a0, %lo(var+12)(a7)
; RV64I-LP64E-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64I-LP64E-NEXT: sw a0, %lo(var+8)(a6)
+; RV64I-LP64E-NEXT: sw a0, %lo(var+8)(a7)
; RV64I-LP64E-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64I-LP64E-NEXT: sw a0, %lo(var+4)(a6)
+; RV64I-LP64E-NEXT: sw a0, %lo(var+4)(a7)
; RV64I-LP64E-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
-; RV64I-LP64E-NEXT: sw a0, %lo(var)(a6)
+; RV64I-LP64E-NEXT: sw a0, %lo(var)(a7)
; RV64I-LP64E-NEXT: ld ra, 64(sp) # 8-byte Folded Reload
; RV64I-LP64E-NEXT: ld s0, 56(sp) # 8-byte Folded Reload
; RV64I-LP64E-NEXT: ld s1, 48(sp) # 8-byte Folded Reload
@@ -764,16 +771,17 @@ define void @callee() nounwind {
; RV64I-WITH-FP-NEXT: sd s10, 64(sp) # 8-byte Folded Spill
; RV64I-WITH-FP-NEXT: sd s11, 56(sp) # 8-byte Folded Spill
; RV64I-WITH-FP-NEXT: addi s0, sp, 160
-; RV64I-WITH-FP-NEXT: lui a6, %hi(var)
-; RV64I-WITH-FP-NEXT: lw a0, %lo(var)(a6)
+; RV64I-WITH-FP-NEXT: lui t0, %hi(var)
+; RV64I-WITH-FP-NEXT: lw a0, %lo(var)(t0)
; RV64I-WITH-FP-NEXT: sd a0, -112(s0) # 8-byte Folded Spill
-; RV64I-WITH-FP-NEXT: lw a0, %lo(var+4)(a6)
+; RV64I-WITH-FP-NEXT: lw a0, %lo(var+4)(t0)
; RV64I-WITH-FP-NEXT: sd a0, -120(s0) # 8-byte Folded Spill
-; RV64I-WITH-FP-NEXT: lw a0, %lo(var+8)(a6)
+; RV64I-WITH-FP-NEXT: lw a0, %lo(var+8)(t0)
; RV64I-WITH-FP-NEXT: sd a0, -128(s0) # 8-byte Folded Spill
-; RV64I-WITH-FP-NEXT: lw a0, %lo(var+12)(a6)
+; RV64I-WITH-FP-NEXT: lw a0, %lo(var+12)(t0)
; RV64I-WITH-FP-NEXT: sd a0, -136(s0) # 8-byte Folded Spill
-; RV64I-WITH-FP-NEXT: addi a5, a6, %lo(var)
+; RV64I-WITH-FP-NEXT: lui a5, %hi(var)
+; RV64I-WITH-FP-NEXT: addi a5, a5, %lo(var)
; RV64I-WITH-FP-NEXT: lw a0, 16(a5)
; RV64I-WITH-FP-NEXT: sd a0, -144(s0) # 8-byte Folded Spill
; RV64I-WITH-FP-NEXT: lw a0, 20(a5)
@@ -798,8 +806,8 @@ define void @callee() nounwind {
; RV64I-WITH-FP-NEXT: lw s10, 88(a5)
; RV64I-WITH-FP-NEXT: lw s11, 92(a5)
; RV64I-WITH-FP-NEXT: lw ra, 96(a5)
-; RV64I-WITH-FP-NEXT: lw t0, 100(a5)
-; RV64I-WITH-FP-NEXT: lw a7, 104(a5)
+; RV64I-WITH-FP-NEXT: lw a7, 100(a5)
+; RV64I-WITH-FP-NEXT: lw a6, 104(a5)
; RV64I-WITH-FP-NEXT: lw a4, 108(a5)
; RV64I-WITH-FP-NEXT: lw a0, 124(a5)
; RV64I-WITH-FP-NEXT: lw a1, 120(a5)
@@ -810,8 +818,8 @@ define void @callee() nounwind {
; RV64I-WITH-FP-NEXT: sw a2, 116(a5)
; RV64I-WITH-FP-NEXT: sw a3, 112(a5)
; RV64I-WITH-FP-NEXT: sw a4, 108(a5)
-; RV64I-WITH-FP-NEXT: sw a7, 104(a5)
-; RV64I-WITH-FP-NEXT: sw t0, 100(a5)
+; RV64I-WITH-FP-NEXT: sw a6, 104(a5)
+; RV64I-WITH-FP-NEXT: sw a7, 100(a5)
; RV64I-WITH-FP-NEXT: sw ra, 96(a5)
; RV64I-WITH-FP-NEXT: sw s11, 92(a5)
; RV64I-WITH-FP-NEXT: sw s10, 88(a5)
@@ -837,13 +845,13 @@ define void @callee() nounwind {
; RV64I-WITH-FP-NEXT: ld a0, -144(s0) # 8-byte Folded Reload
; RV64I-WITH-FP-NEXT: sw a0, 16(a5)
; RV64I-WITH-FP-NEXT: ld a0, -136(s0) # 8-byte Folded Reload
-; RV64I-WITH-FP-NEXT: sw a0, %lo(var+12)(a6)
+; RV64I-WITH-FP-NEXT: sw a0, %lo(var+12)(t0)
; RV64I-WITH-FP-NEXT: ld a0, -128(s0) # 8-byte Folded Reload
-; RV64I-WITH-FP-NEXT: sw a0, %lo(var+8)(a6)
+; RV64I-WITH-FP-NEXT: sw a0, %lo(var+8)(t0)
; RV64I-WITH-FP-NEXT: ld a0, -120(s0) # 8-byte Folded Reload
-; RV64I-WITH-FP-NEXT: sw a0, %lo(var+4)(a6)
+; RV64I-WITH-FP-NEXT: sw a0, %lo(var+4)(t0)
; RV64I-WITH-FP-NEXT: ld a0, -112(s0) # 8-byte Folded Reload
-; RV64I-WITH-FP-NEXT: sw a0, %lo(var)(a6)
+; RV64I-WITH-FP-NEXT: sw a0, %lo(var)(t0)
; RV64I-WITH-FP-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
; RV64I-WITH-FP-NEXT: ld s0, 144(sp) # 8-byte Folded Reload
; RV64I-WITH-FP-NEXT: ld s1, 136(sp) # 8-byte Folded Reload
@@ -863,16 +871,17 @@ define void @callee() nounwind {
; RV64IZCMP-LABEL: callee:
; RV64IZCMP: # %bb.0:
; RV64IZCMP-NEXT: cm.push {ra, s0-s11}, -160
-; RV64IZCMP-NEXT: lui a6, %hi(var)
-; RV64IZCMP-NEXT: lw a0, %lo(var)(a6)
+; RV64IZCMP-NEXT: lui a7, %hi(var)
+; RV64IZCMP-NEXT: lw a0, %lo(var)(a7)
; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, %lo(var+4)(a6)
+; RV64IZCMP-NEXT: lw a0, %lo(var+4)(a7)
; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, %lo(var+8)(a6)
+; RV64IZCMP-NEXT: lw a0, %lo(var+8)(a7)
; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, %lo(var+12)(a6)
+; RV64IZCMP-NEXT: lw a0, %lo(var+12)(a7)
; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: addi a5, a6, %lo(var)
+; RV64IZCMP-NEXT: lui a5, %hi(var)
+; RV64IZCMP-NEXT: addi a5, a5, %lo(var)
; RV64IZCMP-NEXT: lw a0, 16(a5)
; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: lw a0, 20(a5)
@@ -897,7 +906,7 @@ define void @callee() nounwind {
; RV64IZCMP-NEXT: lw t1, 92(a5)
; RV64IZCMP-NEXT: lw t0, 96(a5)
; RV64IZCMP-NEXT: lw s0, 100(a5)
-; RV64IZCMP-NEXT: lw a7, 104(a5)
+; RV64IZCMP-NEXT: lw a6, 104(a5)
; RV64IZCMP-NEXT: lw a4, 108(a5)
; RV64IZCMP-NEXT: lw a0, 124(a5)
; RV64IZCMP-NEXT: lw a1, 120(a5)
@@ -908,7 +917,7 @@ define void @callee() nounwind {
; RV64IZCMP-NEXT: sw a2, 116(a5)
; RV64IZCMP-NEXT: sw a3, 112(a5)
; RV64IZCMP-NEXT: sw a4, 108(a5)
-; RV64IZCMP-NEXT: sw a7, 104(a5)
+; RV64IZCMP-NEXT: sw a6, 104(a5)
; RV64IZCMP-NEXT: sw s0, 100(a5)
; RV64IZCMP-NEXT: sw t0, 96(a5)
; RV64IZCMP-NEXT: sw t1, 92(a5)
@@ -934,13 +943,13 @@ define void @callee() nounwind {
; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 16(a5)
; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: sw a0, %lo(var+12)(a6)
+; RV64IZCMP-NEXT: sw a0, %lo(var+12)(a7)
; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: sw a0, %lo(var+8)(a6)
+; RV64IZCMP-NEXT: sw a0, %lo(var+8)(a7)
; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: sw a0, %lo(var+4)(a6)
+; RV64IZCMP-NEXT: sw a0, %lo(var+4)(a7)
; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: sw a0, %lo(var)(a6)
+; RV64IZCMP-NEXT: sw a0, %lo(var)(a7)
; RV64IZCMP-NEXT: cm.popret {ra, s0-s11}, 160
;
; RV64IZCMP-WITH-FP-LABEL: callee:
@@ -960,16 +969,17 @@ define void @callee() nounwind {
; RV64IZCMP-WITH-FP-NEXT: sd s10, 64(sp) # 8-byte Folded Spill
; RV64IZCMP-WITH-FP-NEXT: sd s11, 56(sp) # 8-byte Folded Spill
; RV64IZCMP-WITH-FP-NEXT: addi s0, sp, 160
-; RV64IZCMP-WITH-FP-NEXT: lui a6, %hi(var)
-; RV64IZCMP-WITH-FP-NEXT: lw a0, %lo(var)(a6)
+; RV64IZCMP-WITH-FP-NEXT: lui t0, %hi(var)
+; RV64IZCMP-WITH-FP-NEXT: lw a0, %lo(var)(t0)
; RV64IZCMP-WITH-FP-NEXT: sd a0, -112(s0) # 8-byte Folded Spill
-; RV64IZCMP-WITH-FP-NEXT: lw a0, %lo(var+4)(a6)
+; RV64IZCMP-WITH-FP-NEXT: lw a0, %lo(var+4)(t0)
; RV64IZCMP-WITH-FP-NEXT: sd a0, -120(s0) # 8-byte Folded Spill
-; RV64IZCMP-WITH-FP-NEXT: lw a0, %lo(var+8)(a6)
+; RV64IZCMP-WITH-FP-NEXT: lw a0, %lo(var+8)(t0)
; RV64IZCMP-WITH-FP-NEXT: sd a0, -128(s0) # 8-byte Folded Spill
-; RV64IZCMP-WITH-FP-NEXT: lw a0, %lo(var+12)(a6)
+; RV64IZCMP-WITH-FP-NEXT: lw a0, %lo(var+12)(t0)
; RV64IZCMP-WITH-FP-NEXT: sd a0, -136(s0) # 8-byte Folded Spill
-; RV64IZCMP-WITH-FP-NEXT: addi a5, a6, %lo(var)
+; RV64IZCMP-WITH-FP-NEXT: lui a5, %hi(var)
+; RV64IZCMP-WITH-FP-NEXT: addi a5, a5, %lo(var)
; RV64IZCMP-WITH-FP-NEXT: lw a0, 16(a5)
; RV64IZCMP-WITH-FP-NEXT: sd a0, -144(s0) # 8-byte Folded Spill
; RV64IZCMP-WITH-FP-NEXT: lw a0, 20(a5)
@@ -994,8 +1004,8 @@ define void @callee() nounwind {
; RV64IZCMP-WITH-FP-NEXT: lw t2, 88(a5)
; RV64IZCMP-WITH-FP-NEXT: lw s1, 92(a5)
; RV64IZCMP-WITH-FP-NEXT: lw t1, 96(a5)
-; RV64IZCMP-WITH-FP-NEXT: lw t0, 100(a5)
-; RV64IZCMP-WITH-FP-NEXT: lw a7, 104(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw a7, 100(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw a6, 104(a5)
; RV64IZCMP-WITH-FP-NEXT: lw a4, 108(a5)
; RV64IZCMP-WITH-FP-NEXT: lw a0, 124(a5)
; RV64IZCMP-WITH-FP-NEXT: lw a1, 120(a5)
@@ -1006,8 +1016,8 @@ define void @callee() nounwind {
; RV64IZCMP-WITH-FP-NEXT: sw a2, 116(a5)
; RV64IZCMP-WITH-FP-NEXT: sw a3, 112(a5)
; RV64IZCMP-WITH-FP-NEXT: sw a4, 108(a5)
-; RV64IZCMP-WITH-FP-NEXT: sw a7, 104(a5)
-; RV64IZCMP-WITH-FP-NEXT: sw t0, 100(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw a6, 104(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw a7, 100(a5)
; RV64IZCMP-WITH-FP-NEXT: sw t1, 96(a5)
; RV64IZCMP-WITH-FP-NEXT: sw s1, 92(a5)
; RV64IZCMP-WITH-FP-NEXT: sw t2, 88(a5)
@@ -1033,13 +1043,13 @@ define void @callee() nounwind {
; RV64IZCMP-WITH-FP-NEXT: ld a0, -144(s0) # 8-byte Folded Reload
; RV64IZCMP-WITH-FP-NEXT: sw a0, 16(a5)
; RV64IZCMP-WITH-FP-NEXT: ld a0, -136(s0) # 8-byte Folded Reload
-; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var+12)(a6)
+; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var+12)(t0)
; RV64IZCMP-WITH-FP-NEXT: ld a0, -128(s0) # 8-byte Folded Reload
-; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var+8)(a6)
+; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var+8)(t0)
; RV64IZCMP-WITH-FP-NEXT: ld a0, -120(s0) # 8-byte Folded Reload
-; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var+4)(a6)
+; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var+4)(t0)
; RV64IZCMP-WITH-FP-NEXT: ld a0, -112(s0) # 8-byte Folded Reload
-; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var)(a6)
+; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var)(t0)
; RV64IZCMP-WITH-FP-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
; RV64IZCMP-WITH-FP-NEXT: ld s0, 144(sp) # 8-byte Folded Reload
; RV64IZCMP-WITH-FP-NEXT: ld s1, 136(sp) # 8-byte Folded Reload
@@ -1089,7 +1099,8 @@ define void @caller() nounwind {
; RV32I-NEXT: sw a0, 80(sp) # 4-byte Folded Spill
; RV32I-NEXT: lw a0, %lo(var+12)(s0)
; RV32I-NEXT: sw a0, 76(sp) # 4-byte Folded Spill
-; RV32I-NEXT: addi s5, s0, %lo(var)
+; RV32I-NEXT: lui s5, %hi(var)
+; RV32I-NEXT: addi s5, s5, %lo(var)
; RV32I-NEXT: lw a0, 16(s5)
; RV32I-NEXT: sw a0, 72(sp) # 4-byte Folded Spill
; RV32I-NEXT: lw a0, 20(s5)
@@ -1220,9 +1231,10 @@ define void @caller() nounwind {
; RV32I-ILP32E-NEXT: sw a1, 116(sp) # 4-byte Folded Spill
; RV32I-ILP32E-NEXT: lw a1, %lo(var+8)(a0)
; RV32I-ILP32E-NEXT: sw a1, 112(sp) # 4-byte Folded Spill
-; RV32I-ILP32E-NEXT: lw a1, %lo(var+12)(a0)
-; RV32I-ILP32E-NEXT: sw a1, 108(sp) # 4-byte Folded Spill
-; RV32I-ILP32E-NEXT: addi s1, a0, %lo(var)
+; RV32I-ILP32E-NEXT: lw a0, %lo(var+12)(a0)
+; RV32I-ILP32E-NEXT: sw a0, 108(sp) # 4-byte Folded Spill
+; RV32I-ILP32E-NEXT: lui s1, %hi(var)
+; RV32I-ILP32E-NEXT: addi s1, s1, %lo(var)
; RV32I-ILP32E-NEXT: lw a0, 16(s1)
; RV32I-ILP32E-NEXT: sw a0, 104(sp) # 4-byte Folded Spill
; RV32I-ILP32E-NEXT: lw a0, 20(s1)
@@ -1375,7 +1387,8 @@ define void @caller() nounwind {
; RV32I-WITH-FP-NEXT: sw a0, -64(s0) # 4-byte Folded Spill
; RV32I-WITH-FP-NEXT: lw a0, %lo(var+12)(s1)
; RV32I-WITH-FP-NEXT: sw a0, -68(s0) # 4-byte Folded Spill
-; RV32I-WITH-FP-NEXT: addi s6, s1, %lo(var)
+; RV32I-WITH-FP-NEXT: lui s6, %hi(var)
+; RV32I-WITH-FP-NEXT: addi s6, s6, %lo(var)
; RV32I-WITH-FP-NEXT: lw a0, 16(s6)
; RV32I-WITH-FP-NEXT: sw a0, -72(s0) # 4-byte Folded Spill
; RV32I-WITH-FP-NEXT: lw a0, 20(s6)
@@ -1508,7 +1521,8 @@ define void @caller() nounwind {
; RV32IZCMP-NEXT: sw a0, 84(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, %lo(var+12)(s0)
; RV32IZCMP-NEXT: sw a0, 80(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: addi s1, s0, %lo(var)
+; RV32IZCMP-NEXT: lui s1, %hi(var)
+; RV32IZCMP-NEXT: addi s1, s1, %lo(var)
; RV32IZCMP-NEXT: lw a0, 16(s1)
; RV32IZCMP-NEXT: sw a0, 76(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 20(s1)
@@ -1639,7 +1653,8 @@ define void @caller() nounwind {
; RV32IZCMP-WITH-FP-NEXT: sw a0, -64(s0) # 4-byte Folded Spill
; RV32IZCMP-WITH-FP-NEXT: lw a0, %lo(var+12)(s6)
; RV32IZCMP-WITH-FP-NEXT: sw a0, -68(s0) # 4-byte Folded Spill
-; RV32IZCMP-WITH-FP-NEXT: addi s1, s6, %lo(var)
+; RV32IZCMP-WITH-FP-NEXT: lui s1, %hi(var)
+; RV32IZCMP-WITH-FP-NEXT: addi s1, s1, %lo(var)
; RV32IZCMP-WITH-FP-NEXT: lw a0, 16(s1)
; RV32IZCMP-WITH-FP-NEXT: sw a0, -72(s0) # 4-byte Folded Spill
; RV32IZCMP-WITH-FP-NEXT: lw a0, 20(s1)
@@ -1784,7 +1799,8 @@ define void @caller() nounwind {
; RV64I-NEXT: sd a0, 160(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, %lo(var+12)(s0)
; RV64I-NEXT: sd a0, 152(sp) # 8-byte Folded Spill
-; RV64I-NEXT: addi s5, s0, %lo(var)
+; RV64I-NEXT: lui s5, %hi(var)
+; RV64I-NEXT: addi s5, s5, %lo(var)
; RV64I-NEXT: lw a0, 16(s5)
; RV64I-NEXT: sd a0, 144(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, 20(s5)
@@ -1915,9 +1931,10 @@ define void @caller() nounwind {
; RV64I-LP64E-NEXT: sd a1, 232(sp) # 8-byte Folded Spill
; RV64I-LP64E-NEXT: lw a1, %lo(var+8)(a0)
; RV64I-LP64E-NEXT: sd a1, 224(sp) # 8-byte Folded Spill
-; RV64I-LP64E-NEXT: lw a1, %lo(var+12)(a0)
-; RV64I-LP64E-NEXT: sd a1, 216(sp) # 8-byte Folded Spill
-; RV64I-LP64E-NEXT: addi s1, a0, %lo(var)
+; RV64I-LP64E-NEXT: lw a0, %lo(var+12)(a0)
+; RV64I-LP64E-NEXT: sd a0, 216(sp) # 8-byte Folded Spill
+; RV64I-LP64E-NEXT: lui s1, %hi(var)
+; RV64I-LP64E-NEXT: addi s1, s1, %lo(var)
; RV64I-LP64E-NEXT: lw a0, 16(s1)
; RV64I-LP64E-NEXT: sd a0, 208(sp) # 8-byte Folded Spill
; RV64I-LP64E-NEXT: lw a0, 20(s1)
@@ -2070,7 +2087,8 @@ define void @caller() nounwind {
; RV64I-WITH-FP-NEXT: sd a0, -128(s0) # 8-byte Folded Spill
; RV64I-WITH-FP-NEXT: lw a0, %lo(var+12)(s1)
; RV64I-WITH-FP-NEXT: sd a0, -136(s0) # 8-byte Folded Spill
-; RV64I-WITH-FP-NEXT: addi s6, s1, %lo(var)
+; RV64I-WITH-FP-NEXT: lui s6, %hi(var)
+; RV64I-WITH-FP-NEXT: addi s6, s6, %lo(var)
; RV64I-WITH-FP-NEXT: lw a0, 16(s6)
; RV64I-WITH-FP-NEXT: sd a0, -144(s0) # 8-byte Folded Spill
; RV64I-WITH-FP-NEXT: lw a0, 20(s6)
@@ -2203,7 +2221,8 @@ define void @caller() nounwind {
; RV64IZCMP-NEXT: sd a0, 152(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: lw a0, %lo(var+12)(s0)
; RV64IZCMP-NEXT: sd a0, 144(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: addi s1, s0, %lo(var)
+; RV64IZCMP-NEXT: lui s1, %hi(var)
+; RV64IZCMP-NEXT: addi s1, s1, %lo(var)
; RV64IZCMP-NEXT: lw a0, 16(s1)
; RV64IZCMP-NEXT: sd a0, 136(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: lw a0, 20(s1)
@@ -2334,7 +2353,8 @@ define void @caller() nounwind {
; RV64IZCMP-WITH-FP-NEXT: sd a0, -128(s0) # 8-byte Folded Spill
; RV64IZCMP-WITH-FP-NEXT: lw a0, %lo(var+12)(s6)
; RV64IZCMP-WITH-FP-NEXT: sd a0, -136(s0) # 8-byte Folded Spill
-; RV64IZCMP-WITH-FP-NEXT: addi s1, s6, %lo(var)
+; RV64IZCMP-WITH-FP-NEXT: lui s1, %hi(var)
+; RV64IZCMP-WITH-FP-NEXT: addi s1, s1, %lo(var)
; RV64IZCMP-WITH-FP-NEXT: lw a0, 16(s1)
; RV64IZCMP-WITH-FP-NEXT: sd a0, -144(s0) # 8-byte Folded Spill
; RV64IZCMP-WITH-FP-NEXT: lw a0, 20(s1)
diff --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
index 549d531e829ea..a90c244437a03 100644
--- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
@@ -383,8 +383,8 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
; RV32I-NEXT: mv a1, s3
; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: lui a0, %hi(.LCPI3_0)
-; RV32I-NEXT: addi s4, a0, %lo(.LCPI3_0)
+; RV32I-NEXT: lui s4, %hi(.LCPI3_0)
+; RV32I-NEXT: addi s4, s4, %lo(.LCPI3_0)
; RV32I-NEXT: neg a0, s2
; RV32I-NEXT: and a0, s2, a0
; RV32I-NEXT: mv a1, s3
@@ -442,9 +442,9 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
; RV32M-LABEL: test_cttz_i64:
; RV32M: # %bb.0:
; RV32M-NEXT: lui a2, 30667
-; RV32M-NEXT: addi a2, a2, 1329
-; RV32M-NEXT: lui a3, %hi(.LCPI3_0)
-; RV32M-NEXT: addi a3, a3, %lo(.LCPI3_0)
+; RV32M-NEXT: addi a3, a2, 1329
+; RV32M-NEXT: lui a2, %hi(.LCPI3_0)
+; RV32M-NEXT: addi a2, a2, %lo(.LCPI3_0)
; RV32M-NEXT: bnez a1, .LBB3_3
; RV32M-NEXT: # %bb.1:
; RV32M-NEXT: li a1, 32
@@ -452,18 +452,18 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
; RV32M-NEXT: .LBB3_2:
; RV32M-NEXT: neg a1, a0
; RV32M-NEXT: and a0, a0, a1
-; RV32M-NEXT: mul a0, a0, a2
+; RV32M-NEXT: mul a0, a0, a3
; RV32M-NEXT: srli a0, a0, 27
-; RV32M-NEXT: add a0, a3, a0
+; RV32M-NEXT: add a0, a2, a0
; RV32M-NEXT: lbu a0, 0(a0)
; RV32M-NEXT: li a1, 0
; RV32M-NEXT: ret
; RV32M-NEXT: .LBB3_3:
; RV32M-NEXT: neg a4, a1
; RV32M-NEXT: and a1, a1, a4
-; RV32M-NEXT: mul a1, a1, a2
+; RV32M-NEXT: mul a1, a1, a3
; RV32M-NEXT: srli a1, a1, 27
-; RV32M-NEXT: add a1, a3, a1
+; RV32M-NEXT: add a1, a2, a1
; RV32M-NEXT: lbu a1, 0(a1)
; RV32M-NEXT: bnez a0, .LBB3_2
; RV32M-NEXT: .LBB3_4:
@@ -814,8 +814,8 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
; RV32I-NEXT: mv a1, s3
; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: lui a0, %hi(.LCPI7_0)
-; RV32I-NEXT: addi s4, a0, %lo(.LCPI7_0)
+; RV32I-NEXT: lui s4, %hi(.LCPI7_0)
+; RV32I-NEXT: addi s4, s4, %lo(.LCPI7_0)
; RV32I-NEXT: neg a0, s1
; RV32I-NEXT: and a0, s1, a0
; RV32I-NEXT: mv a1, s3
diff --git a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
index 9ae30e646fdbf..fe6e20d852d59 100644
--- a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
+++ b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
@@ -48,8 +48,8 @@ define signext i32 @ctz_dereferencing_pointer(ptr %b) nounwind {
; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: lui a0, %hi(.LCPI0_0)
-; RV32I-NEXT: addi s3, a0, %lo(.LCPI0_0)
+; RV32I-NEXT: lui s3, %hi(.LCPI0_0)
+; RV32I-NEXT: addi s3, s3, %lo(.LCPI0_0)
; RV32I-NEXT: neg a0, s4
; RV32I-NEXT: and a0, s4, a0
; RV32I-NEXT: mv a1, s1
@@ -511,8 +511,8 @@ define signext i32 @ctz4(i64 %b) nounwind {
; RV32I-NEXT: mv a1, s3
; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: lui a0, %hi(.LCPI6_0)
-; RV32I-NEXT: addi s4, a0, %lo(.LCPI6_0)
+; RV32I-NEXT: lui s4, %hi(.LCPI6_0)
+; RV32I-NEXT: addi s4, s4, %lo(.LCPI6_0)
; RV32I-NEXT: neg a0, s2
; RV32I-NEXT: and a0, s2, a0
; RV32I-NEXT: mv a1, s3
diff --git a/llvm/test/CodeGen/RISCV/double-mem.ll b/llvm/test/CodeGen/RISCV/double-mem.ll
index 38cb52b6f4b30..abd0b87148036 100644
--- a/llvm/test/CodeGen/RISCV/double-mem.ll
+++ b/llvm/test/CodeGen/RISCV/double-mem.ll
@@ -85,10 +85,10 @@ define dso_local double @fld_fsd_global(double %a, double %b) nounwind {
; CHECKIFD-NEXT: fadd.d fa0, fa0, fa1
; CHECKIFD-NEXT: lui a0, %hi(G)
; CHECKIFD-NEXT: fld fa5, %lo(G)(a0)
-; CHECKIFD-NEXT: addi a1, a0, %lo(G)
+; CHECKIFD-NEXT: lui a1, %hi(G+72)
; CHECKIFD-NEXT: fsd fa0, %lo(G)(a0)
-; CHECKIFD-NEXT: fld fa5, 72(a1)
-; CHECKIFD-NEXT: fsd fa0, 72(a1)
+; CHECKIFD-NEXT: fld fa5, %lo(G+72)(a1)
+; CHECKIFD-NEXT: fsd fa0, %lo(G+72)(a1)
; CHECKIFD-NEXT: ret
;
; RV32IZFINXZDINX-LABEL: fld_fsd_global:
@@ -97,7 +97,8 @@ define dso_local double @fld_fsd_global(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: lui a2, %hi(G)
; RV32IZFINXZDINX-NEXT: lw a4, %lo(G)(a2)
; RV32IZFINXZDINX-NEXT: lw a5, %lo(G+4)(a2)
-; RV32IZFINXZDINX-NEXT: addi a3, a2, %lo(G)
+; RV32IZFINXZDINX-NEXT: lui a3, %hi(G)
+; RV32IZFINXZDINX-NEXT: addi a3, a3, %lo(G)
; RV32IZFINXZDINX-NEXT: sw a0, %lo(G)(a2)
; RV32IZFINXZDINX-NEXT: sw a1, %lo(G+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a4, 72(a3)
@@ -111,10 +112,10 @@ define dso_local double @fld_fsd_global(double %a, double %b) nounwind {
; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, a1
; RV64IZFINXZDINX-NEXT: lui a1, %hi(G)
; RV64IZFINXZDINX-NEXT: ld zero, %lo(G)(a1)
-; RV64IZFINXZDINX-NEXT: addi a2, a1, %lo(G)
+; RV64IZFINXZDINX-NEXT: lui a2, %hi(G+72)
; RV64IZFINXZDINX-NEXT: sd a0, %lo(G)(a1)
-; RV64IZFINXZDINX-NEXT: ld zero, 72(a2)
-; RV64IZFINXZDINX-NEXT: sd a0, 72(a2)
+; RV64IZFINXZDINX-NEXT: ld zero, %lo(G+72)(a2)
+; RV64IZFINXZDINX-NEXT: sd a0, %lo(G+72)(a2)
; RV64IZFINXZDINX-NEXT: ret
; Use %a and %b in an FP op to ensure floating point registers are used, even
; for the soft float ABI
diff --git a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
index eb6ac985287a1..478d2eae9dca2 100644
--- a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
+++ b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
@@ -24,31 +24,31 @@ define void @_Z3foov() {
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_49)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_49)
; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, ma
-; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_48)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_48)
-; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v10, (a0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs1r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_46)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_46)
-; CHECK-NEXT: vle16.v v12, (a0)
+; CHECK-NEXT: vle16.v v10, (a0)
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_45)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_45)
-; CHECK-NEXT: vle16.v v14, (a0)
+; CHECK-NEXT: vle16.v v12, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: vs2r.v v14, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vs2r.v v16, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_40)
diff --git a/llvm/test/CodeGen/RISCV/float-mem.ll b/llvm/test/CodeGen/RISCV/float-mem.ll
index 3779d39a753e1..5cf56d283d1ea 100644
--- a/llvm/test/CodeGen/RISCV/float-mem.ll
+++ b/llvm/test/CodeGen/RISCV/float-mem.ll
@@ -65,10 +65,10 @@ define dso_local float @flw_fsw_global(float %a, float %b) nounwind {
; CHECKIF-NEXT: fadd.s fa0, fa0, fa1
; CHECKIF-NEXT: lui a0, %hi(G)
; CHECKIF-NEXT: flw fa5, %lo(G)(a0)
-; CHECKIF-NEXT: addi a1, a0, %lo(G)
+; CHECKIF-NEXT: lui a1, %hi(G+36)
; CHECKIF-NEXT: fsw fa0, %lo(G)(a0)
-; CHECKIF-NEXT: flw fa5, 36(a1)
-; CHECKIF-NEXT: fsw fa0, 36(a1)
+; CHECKIF-NEXT: flw fa5, %lo(G+36)(a1)
+; CHECKIF-NEXT: fsw fa0, %lo(G+36)(a1)
; CHECKIF-NEXT: ret
;
; CHECKIZFINX-LABEL: flw_fsw_global:
@@ -76,10 +76,10 @@ define dso_local float @flw_fsw_global(float %a, float %b) nounwind {
; CHECKIZFINX-NEXT: fadd.s a0, a0, a1
; CHECKIZFINX-NEXT: lui a1, %hi(G)
; CHECKIZFINX-NEXT: lw zero, %lo(G)(a1)
-; CHECKIZFINX-NEXT: addi a2, a1, %lo(G)
+; CHECKIZFINX-NEXT: lui a2, %hi(G+36)
; CHECKIZFINX-NEXT: sw a0, %lo(G)(a1)
-; CHECKIZFINX-NEXT: lw zero, 36(a2)
-; CHECKIZFINX-NEXT: sw a0, 36(a2)
+; CHECKIZFINX-NEXT: lw zero, %lo(G+36)(a2)
+; CHECKIZFINX-NEXT: sw a0, %lo(G+36)(a2)
; CHECKIZFINX-NEXT: ret
%1 = fadd float %a, %b
%2 = load volatile float, ptr @G
diff --git a/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll b/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
index 3c2e84689c979..39b8b4f31b868 100644
--- a/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
+++ b/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
@@ -59,10 +59,10 @@ entry:
define dso_local i64 @load_g_1() nounwind {
; RV32I-LABEL: load_g_1:
; RV32I: # %bb.0: # %entry
-; RV32I-NEXT: lui a1, %hi(g_1)
-; RV32I-NEXT: lw a0, %lo(g_1)(a1)
-; RV32I-NEXT: addi a1, a1, %lo(g_1)
-; RV32I-NEXT: lw a1, 4(a1)
+; RV32I-NEXT: lui a0, %hi(g_1)
+; RV32I-NEXT: lw a0, %lo(g_1)(a0)
+; RV32I-NEXT: lui a1, %hi(g_1+4)
+; RV32I-NEXT: lw a1, %lo(g_1+4)(a1)
; RV32I-NEXT: ret
;
; RV32I-MEDIUM-LABEL: load_g_1:
@@ -94,10 +94,10 @@ entry:
define dso_local i64 @load_g_2() nounwind {
; RV32I-LABEL: load_g_2:
; RV32I: # %bb.0: # %entry
-; RV32I-NEXT: lui a1, %hi(g_2)
-; RV32I-NEXT: lw a0, %lo(g_2)(a1)
-; RV32I-NEXT: addi a1, a1, %lo(g_2)
-; RV32I-NEXT: lw a1, 4(a1)
+; RV32I-NEXT: lui a0, %hi(g_2)
+; RV32I-NEXT: lw a0, %lo(g_2)(a0)
+; RV32I-NEXT: lui a1, %hi(g_2+4)
+; RV32I-NEXT: lw a1, %lo(g_2+4)(a1)
; RV32I-NEXT: ret
;
; RV32I-MEDIUM-LABEL: load_g_2:
@@ -129,10 +129,10 @@ entry:
define dso_local i64 @load_g_4() nounwind {
; RV32I-LABEL: load_g_4:
; RV32I: # %bb.0: # %entry
-; RV32I-NEXT: lui a1, %hi(g_4)
-; RV32I-NEXT: lw a0, %lo(g_4)(a1)
-; RV32I-NEXT: addi a1, a1, %lo(g_4)
-; RV32I-NEXT: lw a1, 4(a1)
+; RV32I-NEXT: lui a0, %hi(g_4)
+; RV32I-NEXT: lw a0, %lo(g_4)(a0)
+; RV32I-NEXT: lui a1, %hi(g_4+4)
+; RV32I-NEXT: lw a1, %lo(g_4+4)(a1)
; RV32I-NEXT: ret
;
; RV32I-MEDIUM-LABEL: load_g_4:
@@ -232,10 +232,10 @@ entry:
define dso_local void @store_g_4() nounwind {
; RV32I-LABEL: store_g_4:
; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: lui a0, %hi(g_4+4)
+; RV32I-NEXT: sw zero, %lo(g_4+4)(a0)
; RV32I-NEXT: lui a0, %hi(g_4)
; RV32I-NEXT: sw zero, %lo(g_4)(a0)
-; RV32I-NEXT: addi a0, a0, %lo(g_4)
-; RV32I-NEXT: sw zero, 4(a0)
; RV32I-NEXT: ret
;
; RV32I-MEDIUM-LABEL: store_g_4:
@@ -389,8 +389,8 @@ define dso_local i32 @load_ga() local_unnamed_addr #0 {
define dso_local i64 @load_ga_8() nounwind {
; RV32I-LABEL: load_ga_8:
; RV32I: # %bb.0: # %entry
-; RV32I-NEXT: lui a0, %hi(ga_8)
-; RV32I-NEXT: addi a1, a0, %lo(ga_8)
+; RV32I-NEXT: lui a1, %hi(ga_8)
+; RV32I-NEXT: addi a1, a1, %lo(ga_8)
; RV32I-NEXT: lw a0, 8(a1)
; RV32I-NEXT: lw a1, 12(a1)
; RV32I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/global-merge-offset.ll b/llvm/test/CodeGen/RISCV/global-merge-offset.ll
index 13afcba181719..2f65ff7e0de82 100644
--- a/llvm/test/CodeGen/RISCV/global-merge-offset.ll
+++ b/llvm/test/CodeGen/RISCV/global-merge-offset.ll
@@ -22,7 +22,8 @@ define void @f1(i32 %a) nounwind {
; CHECK-LABEL: f1:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.L_MergedGlobals)
-; CHECK-NEXT: addi a2, a1, %lo(.L_MergedGlobals)
+; CHECK-NEXT: lui a2, %hi(.L_MergedGlobals)
+; CHECK-NEXT: addi a2, a2, %lo(.L_MergedGlobals)
; CHECK-NEXT: sw a0, 2044(a2)
; CHECK-NEXT: sw a0, 404(a2)
; CHECK-NEXT: sw a0, %lo(.L_MergedGlobals)(a1)
@@ -32,9 +33,9 @@ define void @f1(i32 %a) nounwind {
; CHECK-TOOBIG: # %bb.0:
; CHECK-TOOBIG-NEXT: lui a1, %hi(ga1+1640)
; CHECK-TOOBIG-NEXT: lui a2, %hi(.L_MergedGlobals)
-; CHECK-TOOBIG-NEXT: addi a3, a2, %lo(.L_MergedGlobals)
+; CHECK-TOOBIG-NEXT: lui a3, %hi(.L_MergedGlobals+408)
; CHECK-TOOBIG-NEXT: sw a0, %lo(ga1+1640)(a1)
-; CHECK-TOOBIG-NEXT: sw a0, 408(a3)
+; CHECK-TOOBIG-NEXT: sw a0, %lo(.L_MergedGlobals+408)(a3)
; CHECK-TOOBIG-NEXT: sw a0, %lo(.L_MergedGlobals)(a2)
; CHECK-TOOBIG-NEXT: ret
%ga1_end = getelementptr inbounds [410 x i32], ptr @ga1, i32 0, i64 410
diff --git a/llvm/test/CodeGen/RISCV/global-merge.ll b/llvm/test/CodeGen/RISCV/global-merge.ll
index 20379ee2e7dac..6a88f62b072a6 100644
--- a/llvm/test/CodeGen/RISCV/global-merge.ll
+++ b/llvm/test/CodeGen/RISCV/global-merge.ll
@@ -19,6 +19,7 @@ define void @f1(i32 %a) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.L_MergedGlobals)
; CHECK-NEXT: sw a0, %lo(.L_MergedGlobals)(a1)
+; CHECK-NEXT: lui a1, %hi(.L_MergedGlobals)
; CHECK-NEXT: addi a1, a1, %lo(.L_MergedGlobals)
; CHECK-NEXT: sw a0, 4(a1)
; CHECK-NEXT: sw a0, 8(a1)
diff --git a/llvm/test/CodeGen/RISCV/half-mem.ll b/llvm/test/CodeGen/RISCV/half-mem.ll
index 5b6a94a83f94b..b0590d6520d69 100644
--- a/llvm/test/CodeGen/RISCV/half-mem.ll
+++ b/llvm/test/CodeGen/RISCV/half-mem.ll
@@ -113,10 +113,10 @@ define half @flh_fsh_global(half %a, half %b) nounwind {
; CHECKIZFH-NEXT: fadd.h fa0, fa0, fa1
; CHECKIZFH-NEXT: lui a0, %hi(G)
; CHECKIZFH-NEXT: flh fa5, %lo(G)(a0)
-; CHECKIZFH-NEXT: addi a1, a0, %lo(G)
+; CHECKIZFH-NEXT: lui a1, %hi(G+18)
; CHECKIZFH-NEXT: fsh fa0, %lo(G)(a0)
-; CHECKIZFH-NEXT: flh fa5, 18(a1)
-; CHECKIZFH-NEXT: fsh fa0, 18(a1)
+; CHECKIZFH-NEXT: flh fa5, %lo(G+18)(a1)
+; CHECKIZFH-NEXT: fsh fa0, %lo(G+18)(a1)
; CHECKIZFH-NEXT: ret
;
; CHECKIZHINX-LABEL: flh_fsh_global:
@@ -124,10 +124,10 @@ define half @flh_fsh_global(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: fadd.h a0, a0, a1
; CHECKIZHINX-NEXT: lui a1, %hi(G)
; CHECKIZHINX-NEXT: lh zero, %lo(G)(a1)
-; CHECKIZHINX-NEXT: addi a2, a1, %lo(G)
+; CHECKIZHINX-NEXT: lui a2, %hi(G+18)
; CHECKIZHINX-NEXT: sh a0, %lo(G)(a1)
-; CHECKIZHINX-NEXT: lh zero, 18(a2)
-; CHECKIZHINX-NEXT: sh a0, 18(a2)
+; CHECKIZHINX-NEXT: lh zero, %lo(G+18)(a2)
+; CHECKIZHINX-NEXT: sh a0, %lo(G+18)(a2)
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: flh_fsh_global:
@@ -138,10 +138,10 @@ define half @flh_fsh_global(half %a, half %b) nounwind {
; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5
; CHECKIZFHMIN-NEXT: lui a0, %hi(G)
; CHECKIZFHMIN-NEXT: flh fa5, %lo(G)(a0)
-; CHECKIZFHMIN-NEXT: addi a1, a0, %lo(G)
+; CHECKIZFHMIN-NEXT: lui a1, %hi(G+18)
; CHECKIZFHMIN-NEXT: fsh fa0, %lo(G)(a0)
-; CHECKIZFHMIN-NEXT: flh fa5, 18(a1)
-; CHECKIZFHMIN-NEXT: fsh fa0, 18(a1)
+; CHECKIZFHMIN-NEXT: flh fa5, %lo(G+18)(a1)
+; CHECKIZFHMIN-NEXT: fsh fa0, %lo(G+18)(a1)
; CHECKIZFHMIN-NEXT: ret
;
; CHECKIZHINXMIN-LABEL: flh_fsh_global:
@@ -152,10 +152,10 @@ define half @flh_fsh_global(half %a, half %b) nounwind {
; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0
; CHECKIZHINXMIN-NEXT: lui a1, %hi(G)
; CHECKIZHINXMIN-NEXT: lh zero, %lo(G)(a1)
-; CHECKIZHINXMIN-NEXT: addi a2, a1, %lo(G)
+; CHECKIZHINXMIN-NEXT: lui a2, %hi(G+18)
; CHECKIZHINXMIN-NEXT: sh a0, %lo(G)(a1)
-; CHECKIZHINXMIN-NEXT: lh zero, 18(a2)
-; CHECKIZHINXMIN-NEXT: sh a0, 18(a2)
+; CHECKIZHINXMIN-NEXT: lh zero, %lo(G+18)(a2)
+; CHECKIZHINXMIN-NEXT: sh a0, %lo(G+18)(a2)
; CHECKIZHINXMIN-NEXT: ret
%1 = fadd half %a, %b
%2 = load volatile half, ptr @G
diff --git a/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll b/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
index 5f9866f08c821..ead3356db3fe4 100644
--- a/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
+++ b/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
@@ -306,7 +306,8 @@ define void @self_store() {
; RV32-LABEL: self_store:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, %hi(f)
-; RV32-NEXT: addi a1, a0, %lo(f)
+; RV32-NEXT: lui a1, %hi(f)
+; RV32-NEXT: addi a1, a1, %lo(f)
; RV32-NEXT: sw a1, %lo(f+4)(a0)
; RV32-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/mem.ll b/llvm/test/CodeGen/RISCV/mem.ll
index a9cb80cb66349..565688722ee52 100644
--- a/llvm/test/CodeGen/RISCV/mem.ll
+++ b/llvm/test/CodeGen/RISCV/mem.ll
@@ -170,10 +170,10 @@ define dso_local i32 @lw_sw_global(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: lui a2, %hi(G)
; RV32I-NEXT: lw a1, %lo(G)(a2)
-; RV32I-NEXT: addi a3, a2, %lo(G)
+; RV32I-NEXT: lui a3, %hi(G+36)
; RV32I-NEXT: sw a0, %lo(G)(a2)
-; RV32I-NEXT: lw zero, 36(a3)
-; RV32I-NEXT: sw a0, 36(a3)
+; RV32I-NEXT: lw zero, %lo(G+36)(a3)
+; RV32I-NEXT: sw a0, %lo(G+36)(a3)
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: ret
%1 = load volatile i32, ptr @G
diff --git a/llvm/test/CodeGen/RISCV/mem64.ll b/llvm/test/CodeGen/RISCV/mem64.ll
index 248964146325a..167ca1127ec33 100644
--- a/llvm/test/CodeGen/RISCV/mem64.ll
+++ b/llvm/test/CodeGen/RISCV/mem64.ll
@@ -215,10 +215,10 @@ define dso_local i64 @ld_sd_global(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: lui a2, %hi(G)
; RV64I-NEXT: ld a1, %lo(G)(a2)
-; RV64I-NEXT: addi a3, a2, %lo(G)
+; RV64I-NEXT: lui a3, %hi(G+72)
; RV64I-NEXT: sd a0, %lo(G)(a2)
-; RV64I-NEXT: ld zero, 72(a3)
-; RV64I-NEXT: sd a0, 72(a3)
+; RV64I-NEXT: ld zero, %lo(G+72)(a3)
+; RV64I-NEXT: sd a0, %lo(G+72)(a3)
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: ret
%1 = load volatile i64, ptr @G
diff --git a/llvm/test/CodeGen/RISCV/memcpy.ll b/llvm/test/CodeGen/RISCV/memcpy.ll
index 02f582339d0b7..b5a565762cafd 100644
--- a/llvm/test/CodeGen/RISCV/memcpy.ll
+++ b/llvm/test/CodeGen/RISCV/memcpy.ll
@@ -24,60 +24,66 @@ define i32 @t0() {
; RV32-LABEL: t0:
; RV32: # %bb.0: # %entry
; RV32-NEXT: lui a0, %hi(src)
-; RV32-NEXT: lw a1, %lo(src)(a0)
-; RV32-NEXT: lui a2, %hi(dst)
-; RV32-NEXT: sw a1, %lo(dst)(a2)
; RV32-NEXT: addi a0, a0, %lo(src)
; RV32-NEXT: lbu a1, 10(a0)
-; RV32-NEXT: lh a3, 8(a0)
-; RV32-NEXT: lw a0, 4(a0)
+; RV32-NEXT: lui a2, %hi(dst)
; RV32-NEXT: addi a2, a2, %lo(dst)
; RV32-NEXT: sb a1, 10(a2)
-; RV32-NEXT: sh a3, 8(a2)
+; RV32-NEXT: lh a1, 8(a0)
+; RV32-NEXT: lw a0, 4(a0)
+; RV32-NEXT: lui a3, %hi(src)
+; RV32-NEXT: lw a3, %lo(src)(a3)
+; RV32-NEXT: sh a1, 8(a2)
; RV32-NEXT: sw a0, 4(a2)
+; RV32-NEXT: lui a0, %hi(dst)
+; RV32-NEXT: sw a3, %lo(dst)(a0)
; RV32-NEXT: li a0, 0
; RV32-NEXT: ret
;
; RV64-LABEL: t0:
; RV64: # %bb.0: # %entry
; RV64-NEXT: lui a0, %hi(src)
-; RV64-NEXT: ld a1, %lo(src)(a0)
-; RV64-NEXT: lui a2, %hi(dst)
; RV64-NEXT: addi a0, a0, %lo(src)
-; RV64-NEXT: lbu a3, 10(a0)
+; RV64-NEXT: lbu a1, 10(a0)
+; RV64-NEXT: lui a2, %hi(dst)
+; RV64-NEXT: addi a2, a2, %lo(dst)
; RV64-NEXT: lh a0, 8(a0)
-; RV64-NEXT: sd a1, %lo(dst)(a2)
-; RV64-NEXT: addi a1, a2, %lo(dst)
-; RV64-NEXT: sb a3, 10(a1)
-; RV64-NEXT: sh a0, 8(a1)
+; RV64-NEXT: lui a3, %hi(src)
+; RV64-NEXT: ld a3, %lo(src)(a3)
+; RV64-NEXT: sb a1, 10(a2)
+; RV64-NEXT: sh a0, 8(a2)
+; RV64-NEXT: lui a0, %hi(dst)
+; RV64-NEXT: sd a3, %lo(dst)(a0)
; RV64-NEXT: li a0, 0
; RV64-NEXT: ret
;
; RV32-FAST-LABEL: t0:
; RV32-FAST: # %bb.0: # %entry
; RV32-FAST-NEXT: lui a0, %hi(src)
-; RV32-FAST-NEXT: lw a1, %lo(src)(a0)
-; RV32-FAST-NEXT: lui a2, %hi(dst)
; RV32-FAST-NEXT: addi a0, a0, %lo(src)
-; RV32-FAST-NEXT: lw a3, 7(a0)
+; RV32-FAST-NEXT: lw a1, 7(a0)
+; RV32-FAST-NEXT: lui a2, %hi(dst)
+; RV32-FAST-NEXT: addi a2, a2, %lo(dst)
; RV32-FAST-NEXT: lw a0, 4(a0)
-; RV32-FAST-NEXT: sw a1, %lo(dst)(a2)
-; RV32-FAST-NEXT: addi a1, a2, %lo(dst)
-; RV32-FAST-NEXT: sw a3, 7(a1)
-; RV32-FAST-NEXT: sw a0, 4(a1)
+; RV32-FAST-NEXT: lui a3, %hi(src)
+; RV32-FAST-NEXT: lw a3, %lo(src)(a3)
+; RV32-FAST-NEXT: sw a1, 7(a2)
+; RV32-FAST-NEXT: sw a0, 4(a2)
+; RV32-FAST-NEXT: lui a0, %hi(dst)
+; RV32-FAST-NEXT: sw a3, %lo(dst)(a0)
; RV32-FAST-NEXT: li a0, 0
; RV32-FAST-NEXT: ret
;
; RV64-FAST-LABEL: t0:
; RV64-FAST: # %bb.0: # %entry
-; RV64-FAST-NEXT: lui a0, %hi(src)
-; RV64-FAST-NEXT: ld a1, %lo(src)(a0)
-; RV64-FAST-NEXT: addi a0, a0, %lo(src)
-; RV64-FAST-NEXT: lw a0, 7(a0)
-; RV64-FAST-NEXT: lui a2, %hi(dst)
-; RV64-FAST-NEXT: sd a1, %lo(dst)(a2)
-; RV64-FAST-NEXT: addi a1, a2, %lo(dst)
-; RV64-FAST-NEXT: sw a0, 7(a1)
+; RV64-FAST-NEXT: lui a0, %hi(src+7)
+; RV64-FAST-NEXT: lw a0, %lo(src+7)(a0)
+; RV64-FAST-NEXT: lui a1, %hi(src)
+; RV64-FAST-NEXT: ld a1, %lo(src)(a1)
+; RV64-FAST-NEXT: lui a2, %hi(dst+7)
+; RV64-FAST-NEXT: sw a0, %lo(dst+7)(a2)
+; RV64-FAST-NEXT: lui a0, %hi(dst)
+; RV64-FAST-NEXT: sd a1, %lo(dst)(a0)
; RV64-FAST-NEXT: li a0, 0
; RV64-FAST-NEXT: ret
entry:
@@ -131,15 +137,16 @@ define void @t1(ptr nocapture %C) nounwind {
; RV64-FAST-LABEL: t1:
; RV64-FAST: # %bb.0: # %entry
; RV64-FAST-NEXT: lui a1, %hi(.L.str1)
-; RV64-FAST-NEXT: ld a2, %lo(.L.str1)(a1)
; RV64-FAST-NEXT: addi a1, a1, %lo(.L.str1)
-; RV64-FAST-NEXT: ld a3, 23(a1)
-; RV64-FAST-NEXT: ld a4, 16(a1)
+; RV64-FAST-NEXT: ld a2, 23(a1)
+; RV64-FAST-NEXT: ld a3, 16(a1)
; RV64-FAST-NEXT: ld a1, 8(a1)
-; RV64-FAST-NEXT: sd a2, 0(a0)
-; RV64-FAST-NEXT: sd a3, 23(a0)
-; RV64-FAST-NEXT: sd a4, 16(a0)
+; RV64-FAST-NEXT: lui a4, %hi(.L.str1)
+; RV64-FAST-NEXT: ld a4, %lo(.L.str1)(a4)
+; RV64-FAST-NEXT: sd a2, 23(a0)
+; RV64-FAST-NEXT: sd a3, 16(a0)
; RV64-FAST-NEXT: sd a1, 8(a0)
+; RV64-FAST-NEXT: sd a4, 0(a0)
; RV64-FAST-NEXT: ret
entry:
tail call void @llvm.memcpy.p0.p0.i64(ptr %C, ptr @.str1, i64 31, i1 false)
@@ -164,18 +171,19 @@ define void @t2(ptr nocapture %C) nounwind {
; RV64-FAST-LABEL: t2:
; RV64-FAST: # %bb.0: # %entry
; RV64-FAST-NEXT: lui a1, %hi(.L.str2)
-; RV64-FAST-NEXT: ld a2, %lo(.L.str2)(a1)
-; RV64-FAST-NEXT: sd a2, 0(a0)
-; RV64-FAST-NEXT: lui a2, 1156
-; RV64-FAST-NEXT: addi a2, a2, 332
; RV64-FAST-NEXT: addi a1, a1, %lo(.L.str2)
-; RV64-FAST-NEXT: ld a3, 24(a1)
-; RV64-FAST-NEXT: ld a4, 16(a1)
+; RV64-FAST-NEXT: ld a2, 24(a1)
+; RV64-FAST-NEXT: ld a3, 16(a1)
; RV64-FAST-NEXT: ld a1, 8(a1)
-; RV64-FAST-NEXT: sw a2, 32(a0)
-; RV64-FAST-NEXT: sd a3, 24(a0)
-; RV64-FAST-NEXT: sd a4, 16(a0)
+; RV64-FAST-NEXT: lui a4, %hi(.L.str2)
+; RV64-FAST-NEXT: ld a4, %lo(.L.str2)(a4)
+; RV64-FAST-NEXT: sd a2, 24(a0)
+; RV64-FAST-NEXT: sd a3, 16(a0)
; RV64-FAST-NEXT: sd a1, 8(a0)
+; RV64-FAST-NEXT: sd a4, 0(a0)
+; RV64-FAST-NEXT: lui a1, 1156
+; RV64-FAST-NEXT: addi a1, a1, 332
+; RV64-FAST-NEXT: sw a1, 32(a0)
; RV64-FAST-NEXT: ret
entry:
tail call void @llvm.memcpy.p0.p0.i64(ptr %C, ptr @.str2, i64 36, i1 false)
@@ -222,13 +230,14 @@ define void @t3(ptr nocapture %C) nounwind {
; RV64-FAST-LABEL: t3:
; RV64-FAST: # %bb.0: # %entry
; RV64-FAST-NEXT: lui a1, %hi(.L.str3)
-; RV64-FAST-NEXT: ld a2, %lo(.L.str3)(a1)
; RV64-FAST-NEXT: addi a1, a1, %lo(.L.str3)
-; RV64-FAST-NEXT: ld a3, 16(a1)
+; RV64-FAST-NEXT: ld a2, 16(a1)
; RV64-FAST-NEXT: ld a1, 8(a1)
-; RV64-FAST-NEXT: sd a2, 0(a0)
-; RV64-FAST-NEXT: sd a3, 16(a0)
+; RV64-FAST-NEXT: lui a3, %hi(.L.str3)
+; RV64-FAST-NEXT: ld a3, %lo(.L.str3)(a3)
+; RV64-FAST-NEXT: sd a2, 16(a0)
; RV64-FAST-NEXT: sd a1, 8(a0)
+; RV64-FAST-NEXT: sd a3, 0(a0)
; RV64-FAST-NEXT: ret
entry:
tail call void @llvm.memcpy.p0.p0.i64(ptr %C, ptr @.str3, i64 24, i1 false)
@@ -270,14 +279,14 @@ define void @t4(ptr nocapture %C) nounwind {
;
; RV64-FAST-LABEL: t4:
; RV64-FAST: # %bb.0: # %entry
-; RV64-FAST-NEXT: lui a1, %hi(.L.str4)
-; RV64-FAST-NEXT: ld a2, %lo(.L.str4)(a1)
-; RV64-FAST-NEXT: addi a1, a1, %lo(.L.str4)
-; RV64-FAST-NEXT: ld a1, 8(a1)
+; RV64-FAST-NEXT: lui a1, %hi(.L.str4+8)
+; RV64-FAST-NEXT: ld a1, %lo(.L.str4+8)(a1)
+; RV64-FAST-NEXT: lui a2, %hi(.L.str4)
+; RV64-FAST-NEXT: ld a2, %lo(.L.str4)(a2)
; RV64-FAST-NEXT: li a3, 32
; RV64-FAST-NEXT: sh a3, 16(a0)
-; RV64-FAST-NEXT: sd a2, 0(a0)
; RV64-FAST-NEXT: sd a1, 8(a0)
+; RV64-FAST-NEXT: sd a2, 0(a0)
; RV64-FAST-NEXT: ret
entry:
tail call void @llvm.memcpy.p0.p0.i64(ptr %C, ptr @.str4, i64 18, i1 false)
@@ -390,13 +399,13 @@ define void @t6() nounwind {
;
; RV64-FAST-LABEL: t6:
; RV64-FAST: # %bb.0: # %entry
-; RV64-FAST-NEXT: lui a0, %hi(.L.str6)
-; RV64-FAST-NEXT: ld a1, %lo(.L.str6)(a0)
-; RV64-FAST-NEXT: addi a0, a0, %lo(.L.str6)
-; RV64-FAST-NEXT: ld a0, 6(a0)
+; RV64-FAST-NEXT: lui a0, %hi(.L.str6+6)
+; RV64-FAST-NEXT: ld a0, %lo(.L.str6+6)(a0)
+; RV64-FAST-NEXT: lui a1, %hi(.L.str6)
+; RV64-FAST-NEXT: ld a1, %lo(.L.str6)(a1)
; RV64-FAST-NEXT: lui a2, %hi(spool.splbuf)
-; RV64-FAST-NEXT: sd a1, %lo(spool.splbuf)(a2)
; RV64-FAST-NEXT: sd a0, %lo(spool.splbuf+6)(a2)
+; RV64-FAST-NEXT: sd a1, %lo(spool.splbuf)(a2)
; RV64-FAST-NEXT: ret
entry:
call void @llvm.memcpy.p0.p0.i64(ptr @spool.splbuf, ptr @.str6, i64 14, i1 false)
diff --git a/llvm/test/CodeGen/RISCV/push-pop-popret.ll b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
index 7548faaae61f4..c65c7a815153b 100644
--- a/llvm/test/CodeGen/RISCV/push-pop-popret.ll
+++ b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
@@ -727,11 +727,11 @@ define i32 @nocompress(i32 signext %size) {
; RV32IZCMP-NEXT: lw s4, %lo(var+4)(s1)
; RV32IZCMP-NEXT: lw s5, %lo(var+8)(s1)
; RV32IZCMP-NEXT: lw s6, %lo(var+12)(s1)
-; RV32IZCMP-NEXT: addi s7, s1, %lo(var)
-; RV32IZCMP-NEXT: lw s8, 16(s7)
+; RV32IZCMP-NEXT: lui s7, %hi(var+16)
+; RV32IZCMP-NEXT: lw s8, %lo(var+16)(s7)
; RV32IZCMP-NEXT: mv a0, s2
; RV32IZCMP-NEXT: call callee_void
-; RV32IZCMP-NEXT: sw s8, 16(s7)
+; RV32IZCMP-NEXT: sw s8, %lo(var+16)(s7)
; RV32IZCMP-NEXT: sw s6, %lo(var+12)(s1)
; RV32IZCMP-NEXT: sw s5, %lo(var+8)(s1)
; RV32IZCMP-NEXT: sw s4, %lo(var+4)(s1)
@@ -768,11 +768,11 @@ define i32 @nocompress(i32 signext %size) {
; RV64IZCMP-NEXT: lw s4, %lo(var+4)(s1)
; RV64IZCMP-NEXT: lw s5, %lo(var+8)(s1)
; RV64IZCMP-NEXT: lw s6, %lo(var+12)(s1)
-; RV64IZCMP-NEXT: addi s7, s1, %lo(var)
-; RV64IZCMP-NEXT: lw s8, 16(s7)
+; RV64IZCMP-NEXT: lui s7, %hi(var+16)
+; RV64IZCMP-NEXT: lw s8, %lo(var+16)(s7)
; RV64IZCMP-NEXT: mv a0, s2
; RV64IZCMP-NEXT: call callee_void
-; RV64IZCMP-NEXT: sw s8, 16(s7)
+; RV64IZCMP-NEXT: sw s8, %lo(var+16)(s7)
; RV64IZCMP-NEXT: sw s6, %lo(var+12)(s1)
; RV64IZCMP-NEXT: sw s5, %lo(var+8)(s1)
; RV64IZCMP-NEXT: sw s4, %lo(var+4)(s1)
@@ -807,11 +807,11 @@ define i32 @nocompress(i32 signext %size) {
; RV32IZCMP-SR-NEXT: lw s4, %lo(var+4)(s1)
; RV32IZCMP-SR-NEXT: lw s5, %lo(var+8)(s1)
; RV32IZCMP-SR-NEXT: lw s6, %lo(var+12)(s1)
-; RV32IZCMP-SR-NEXT: addi s7, s1, %lo(var)
-; RV32IZCMP-SR-NEXT: lw s8, 16(s7)
+; RV32IZCMP-SR-NEXT: lui s7, %hi(var+16)
+; RV32IZCMP-SR-NEXT: lw s8, %lo(var+16)(s7)
; RV32IZCMP-SR-NEXT: mv a0, s2
; RV32IZCMP-SR-NEXT: call callee_void
-; RV32IZCMP-SR-NEXT: sw s8, 16(s7)
+; RV32IZCMP-SR-NEXT: sw s8, %lo(var+16)(s7)
; RV32IZCMP-SR-NEXT: sw s6, %lo(var+12)(s1)
; RV32IZCMP-SR-NEXT: sw s5, %lo(var+8)(s1)
; RV32IZCMP-SR-NEXT: sw s4, %lo(var+4)(s1)
@@ -848,11 +848,11 @@ define i32 @nocompress(i32 signext %size) {
; RV64IZCMP-SR-NEXT: lw s4, %lo(var+4)(s1)
; RV64IZCMP-SR-NEXT: lw s5, %lo(var+8)(s1)
; RV64IZCMP-SR-NEXT: lw s6, %lo(var+12)(s1)
-; RV64IZCMP-SR-NEXT: addi s7, s1, %lo(var)
-; RV64IZCMP-SR-NEXT: lw s8, 16(s7)
+; RV64IZCMP-SR-NEXT: lui s7, %hi(var+16)
+; RV64IZCMP-SR-NEXT: lw s8, %lo(var+16)(s7)
; RV64IZCMP-SR-NEXT: mv a0, s2
; RV64IZCMP-SR-NEXT: call callee_void
-; RV64IZCMP-SR-NEXT: sw s8, 16(s7)
+; RV64IZCMP-SR-NEXT: sw s8, %lo(var+16)(s7)
; RV64IZCMP-SR-NEXT: sw s6, %lo(var+12)(s1)
; RV64IZCMP-SR-NEXT: sw s5, %lo(var+8)(s1)
; RV64IZCMP-SR-NEXT: sw s4, %lo(var+4)(s1)
@@ -897,11 +897,11 @@ define i32 @nocompress(i32 signext %size) {
; RV32I-NEXT: lw s4, %lo(var+4)(s2)
; RV32I-NEXT: lw s5, %lo(var+8)(s2)
; RV32I-NEXT: lw s6, %lo(var+12)(s2)
-; RV32I-NEXT: addi s7, s2, %lo(var)
-; RV32I-NEXT: lw s8, 16(s7)
+; RV32I-NEXT: lui s7, %hi(var+16)
+; RV32I-NEXT: lw s8, %lo(var+16)(s7)
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: call callee_void
-; RV32I-NEXT: sw s8, 16(s7)
+; RV32I-NEXT: sw s8, %lo(var+16)(s7)
; RV32I-NEXT: sw s6, %lo(var+12)(s2)
; RV32I-NEXT: sw s5, %lo(var+8)(s2)
; RV32I-NEXT: sw s4, %lo(var+4)(s2)
@@ -958,11 +958,11 @@ define i32 @nocompress(i32 signext %size) {
; RV64I-NEXT: lw s4, %lo(var+4)(s2)
; RV64I-NEXT: lw s5, %lo(var+8)(s2)
; RV64I-NEXT: lw s6, %lo(var+12)(s2)
-; RV64I-NEXT: addi s7, s2, %lo(var)
-; RV64I-NEXT: lw s8, 16(s7)
+; RV64I-NEXT: lui s7, %hi(var+16)
+; RV64I-NEXT: lw s8, %lo(var+16)(s7)
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: call callee_void
-; RV64I-NEXT: sw s8, 16(s7)
+; RV64I-NEXT: sw s8, %lo(var+16)(s7)
; RV64I-NEXT: sw s6, %lo(var+12)(s2)
; RV64I-NEXT: sw s5, %lo(var+8)(s2)
; RV64I-NEXT: sw s4, %lo(var+4)(s2)
@@ -1112,7 +1112,8 @@ define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind {
; RV32IZCMP-NEXT: lw a7, %lo(var0+4)(a0)
; RV32IZCMP-NEXT: lw t0, %lo(var0+8)(a0)
; RV32IZCMP-NEXT: lw t1, %lo(var0+12)(a0)
-; RV32IZCMP-NEXT: addi a5, a0, %lo(var0)
+; RV32IZCMP-NEXT: lui a5, %hi(var0)
+; RV32IZCMP-NEXT: addi a5, a5, %lo(var0)
; RV32IZCMP-NEXT: lw t2, 16(a5)
; RV32IZCMP-NEXT: lw t3, 20(a5)
; RV32IZCMP-NEXT: lw t4, 24(a5)
@@ -1155,7 +1156,8 @@ define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind {
; RV64IZCMP-NEXT: lw a7, %lo(var0+4)(a0)
; RV64IZCMP-NEXT: lw t0, %lo(var0+8)(a0)
; RV64IZCMP-NEXT: lw t1, %lo(var0+12)(a0)
-; RV64IZCMP-NEXT: addi a5, a0, %lo(var0)
+; RV64IZCMP-NEXT: lui a5, %hi(var0)
+; RV64IZCMP-NEXT: addi a5, a5, %lo(var0)
; RV64IZCMP-NEXT: lw t2, 16(a5)
; RV64IZCMP-NEXT: lw t3, 20(a5)
; RV64IZCMP-NEXT: lw t4, 24(a5)
@@ -1198,7 +1200,8 @@ define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind {
; RV32IZCMP-SR-NEXT: lw a7, %lo(var0+4)(a0)
; RV32IZCMP-SR-NEXT: lw t0, %lo(var0+8)(a0)
; RV32IZCMP-SR-NEXT: lw t1, %lo(var0+12)(a0)
-; RV32IZCMP-SR-NEXT: addi a5, a0, %lo(var0)
+; RV32IZCMP-SR-NEXT: lui a5, %hi(var0)
+; RV32IZCMP-SR-NEXT: addi a5, a5, %lo(var0)
; RV32IZCMP-SR-NEXT: lw t2, 16(a5)
; RV32IZCMP-SR-NEXT: lw t3, 20(a5)
; RV32IZCMP-SR-NEXT: lw t4, 24(a5)
@@ -1241,7 +1244,8 @@ define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind {
; RV64IZCMP-SR-NEXT: lw a7, %lo(var0+4)(a0)
; RV64IZCMP-SR-NEXT: lw t0, %lo(var0+8)(a0)
; RV64IZCMP-SR-NEXT: lw t1, %lo(var0+12)(a0)
-; RV64IZCMP-SR-NEXT: addi a5, a0, %lo(var0)
+; RV64IZCMP-SR-NEXT: lui a5, %hi(var0)
+; RV64IZCMP-SR-NEXT: addi a5, a5, %lo(var0)
; RV64IZCMP-SR-NEXT: lw t2, 16(a5)
; RV64IZCMP-SR-NEXT: lw t3, 20(a5)
; RV64IZCMP-SR-NEXT: lw t4, 24(a5)
@@ -1289,7 +1293,8 @@ define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind {
; RV32I-NEXT: lw a2, %lo(var0+4)(a0)
; RV32I-NEXT: lw a3, %lo(var0+8)(a0)
; RV32I-NEXT: lw a4, %lo(var0+12)(a0)
-; RV32I-NEXT: addi a5, a0, %lo(var0)
+; RV32I-NEXT: lui a5, %hi(var0)
+; RV32I-NEXT: addi a5, a5, %lo(var0)
; RV32I-NEXT: lw a6, 16(a5)
; RV32I-NEXT: lw a7, 20(a5)
; RV32I-NEXT: lw t0, 24(a5)
@@ -1343,7 +1348,8 @@ define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind {
; RV64I-NEXT: lw a2, %lo(var0+4)(a0)
; RV64I-NEXT: lw a3, %lo(var0+8)(a0)
; RV64I-NEXT: lw a4, %lo(var0+12)(a0)
-; RV64I-NEXT: addi a5, a0, %lo(var0)
+; RV64I-NEXT: lui a5, %hi(var0)
+; RV64I-NEXT: addi a5, a5, %lo(var0)
; RV64I-NEXT: lw a6, 16(a5)
; RV64I-NEXT: lw a7, 20(a5)
; RV64I-NEXT: lw t0, 24(a5)
@@ -1813,16 +1819,17 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV32IZCMP-NEXT: sw t4, 44(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: sw t5, 40(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: sw t6, 36(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: lui a6, %hi(var_test_irq)
-; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq)(a6)
+; RV32IZCMP-NEXT: lui a7, %hi(var_test_irq)
+; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq)(a7)
; RV32IZCMP-NEXT: sw a0, 32(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(a6)
+; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(a7)
; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(a6)
+; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(a7)
; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(a6)
+; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(a7)
; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: addi a5, a6, %lo(var_test_irq)
+; RV32IZCMP-NEXT: lui a5, %hi(var_test_irq)
+; RV32IZCMP-NEXT: addi a5, a5, %lo(var_test_irq)
; RV32IZCMP-NEXT: lw a0, 16(a5)
; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 20(a5)
@@ -1847,7 +1854,7 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV32IZCMP-NEXT: lw t1, 92(a5)
; RV32IZCMP-NEXT: lw t0, 96(a5)
; RV32IZCMP-NEXT: lw s0, 100(a5)
-; RV32IZCMP-NEXT: lw a7, 104(a5)
+; RV32IZCMP-NEXT: lw a6, 104(a5)
; RV32IZCMP-NEXT: lw a4, 108(a5)
; RV32IZCMP-NEXT: lw a0, 124(a5)
; RV32IZCMP-NEXT: lw a1, 120(a5)
@@ -1858,7 +1865,7 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV32IZCMP-NEXT: sw a2, 116(a5)
; RV32IZCMP-NEXT: sw a3, 112(a5)
; RV32IZCMP-NEXT: sw a4, 108(a5)
-; RV32IZCMP-NEXT: sw a7, 104(a5)
+; RV32IZCMP-NEXT: sw a6, 104(a5)
; RV32IZCMP-NEXT: sw s0, 100(a5)
; RV32IZCMP-NEXT: sw t0, 96(a5)
; RV32IZCMP-NEXT: sw t1, 92(a5)
@@ -1884,13 +1891,13 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: sw a0, 16(a5)
; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(a6)
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(a7)
; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(a6)
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(a7)
; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(a6)
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(a7)
; RV32IZCMP-NEXT: lw a0, 32(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq)(a6)
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq)(a7)
; RV32IZCMP-NEXT: lw t0, 92(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: lw t1, 88(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: lw t2, 84(sp) # 4-byte Folded Reload
@@ -1929,16 +1936,17 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV64IZCMP-NEXT: sd t4, 72(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: sd t5, 64(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: sd t6, 56(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lui a6, %hi(var_test_irq)
-; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq)(a6)
+; RV64IZCMP-NEXT: lui a7, %hi(var_test_irq)
+; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq)(a7)
; RV64IZCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(a6)
+; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(a7)
; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(a6)
+; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(a7)
; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(a6)
+; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(a7)
; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: addi a5, a6, %lo(var_test_irq)
+; RV64IZCMP-NEXT: lui a5, %hi(var_test_irq)
+; RV64IZCMP-NEXT: addi a5, a5, %lo(var_test_irq)
; RV64IZCMP-NEXT: lw a0, 16(a5)
; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: lw a0, 20(a5)
@@ -1963,7 +1971,7 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV64IZCMP-NEXT: lw t1, 92(a5)
; RV64IZCMP-NEXT: lw t0, 96(a5)
; RV64IZCMP-NEXT: lw s0, 100(a5)
-; RV64IZCMP-NEXT: lw a7, 104(a5)
+; RV64IZCMP-NEXT: lw a6, 104(a5)
; RV64IZCMP-NEXT: lw a4, 108(a5)
; RV64IZCMP-NEXT: lw a0, 124(a5)
; RV64IZCMP-NEXT: lw a1, 120(a5)
@@ -1974,7 +1982,7 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV64IZCMP-NEXT: sw a2, 116(a5)
; RV64IZCMP-NEXT: sw a3, 112(a5)
; RV64IZCMP-NEXT: sw a4, 108(a5)
-; RV64IZCMP-NEXT: sw a7, 104(a5)
+; RV64IZCMP-NEXT: sw a6, 104(a5)
; RV64IZCMP-NEXT: sw s0, 100(a5)
; RV64IZCMP-NEXT: sw t0, 96(a5)
; RV64IZCMP-NEXT: sw t1, 92(a5)
@@ -2000,13 +2008,13 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 16(a5)
; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(a6)
+; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(a7)
; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(a6)
+; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(a7)
; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(a6)
+; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(a7)
; RV64IZCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq)(a6)
+; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq)(a7)
; RV64IZCMP-NEXT: ld t0, 168(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: ld t1, 160(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: ld t2, 152(sp) # 8-byte Folded Reload
@@ -2045,16 +2053,17 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV32IZCMP-SR-NEXT: sw t4, 44(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: sw t5, 40(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: sw t6, 36(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: lui a6, %hi(var_test_irq)
-; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(a6)
+; RV32IZCMP-SR-NEXT: lui a7, %hi(var_test_irq)
+; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(a7)
; RV32IZCMP-SR-NEXT: sw a0, 32(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(a6)
+; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(a7)
; RV32IZCMP-SR-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(a6)
+; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(a7)
; RV32IZCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(a6)
+; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(a7)
; RV32IZCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: addi a5, a6, %lo(var_test_irq)
+; RV32IZCMP-SR-NEXT: lui a5, %hi(var_test_irq)
+; RV32IZCMP-SR-NEXT: addi a5, a5, %lo(var_test_irq)
; RV32IZCMP-SR-NEXT: lw a0, 16(a5)
; RV32IZCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: lw a0, 20(a5)
@@ -2079,7 +2088,7 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV32IZCMP-SR-NEXT: lw t1, 92(a5)
; RV32IZCMP-SR-NEXT: lw t0, 96(a5)
; RV32IZCMP-SR-NEXT: lw s0, 100(a5)
-; RV32IZCMP-SR-NEXT: lw a7, 104(a5)
+; RV32IZCMP-SR-NEXT: lw a6, 104(a5)
; RV32IZCMP-SR-NEXT: lw a4, 108(a5)
; RV32IZCMP-SR-NEXT: lw a0, 124(a5)
; RV32IZCMP-SR-NEXT: lw a1, 120(a5)
@@ -2090,7 +2099,7 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV32IZCMP-SR-NEXT: sw a2, 116(a5)
; RV32IZCMP-SR-NEXT: sw a3, 112(a5)
; RV32IZCMP-SR-NEXT: sw a4, 108(a5)
-; RV32IZCMP-SR-NEXT: sw a7, 104(a5)
+; RV32IZCMP-SR-NEXT: sw a6, 104(a5)
; RV32IZCMP-SR-NEXT: sw s0, 100(a5)
; RV32IZCMP-SR-NEXT: sw t0, 96(a5)
; RV32IZCMP-SR-NEXT: sw t1, 92(a5)
@@ -2116,13 +2125,13 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV32IZCMP-SR-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
; RV32IZCMP-SR-NEXT: sw a0, 16(a5)
; RV32IZCMP-SR-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(a6)
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(a7)
; RV32IZCMP-SR-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(a6)
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(a7)
; RV32IZCMP-SR-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(a6)
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(a7)
; RV32IZCMP-SR-NEXT: lw a0, 32(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(a6)
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(a7)
; RV32IZCMP-SR-NEXT: lw t0, 92(sp) # 4-byte Folded Reload
; RV32IZCMP-SR-NEXT: lw t1, 88(sp) # 4-byte Folded Reload
; RV32IZCMP-SR-NEXT: lw t2, 84(sp) # 4-byte Folded Reload
@@ -2161,16 +2170,17 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV64IZCMP-SR-NEXT: sd t4, 72(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: sd t5, 64(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: sd t6, 56(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: lui a6, %hi(var_test_irq)
-; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(a6)
+; RV64IZCMP-SR-NEXT: lui a7, %hi(var_test_irq)
+; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(a7)
; RV64IZCMP-SR-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(a6)
+; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(a7)
; RV64IZCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(a6)
+; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(a7)
; RV64IZCMP-SR-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(a6)
+; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(a7)
; RV64IZCMP-SR-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: addi a5, a6, %lo(var_test_irq)
+; RV64IZCMP-SR-NEXT: lui a5, %hi(var_test_irq)
+; RV64IZCMP-SR-NEXT: addi a5, a5, %lo(var_test_irq)
; RV64IZCMP-SR-NEXT: lw a0, 16(a5)
; RV64IZCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: lw a0, 20(a5)
@@ -2195,7 +2205,7 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV64IZCMP-SR-NEXT: lw t1, 92(a5)
; RV64IZCMP-SR-NEXT: lw t0, 96(a5)
; RV64IZCMP-SR-NEXT: lw s0, 100(a5)
-; RV64IZCMP-SR-NEXT: lw a7, 104(a5)
+; RV64IZCMP-SR-NEXT: lw a6, 104(a5)
; RV64IZCMP-SR-NEXT: lw a4, 108(a5)
; RV64IZCMP-SR-NEXT: lw a0, 124(a5)
; RV64IZCMP-SR-NEXT: lw a1, 120(a5)
@@ -2206,7 +2216,7 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV64IZCMP-SR-NEXT: sw a2, 116(a5)
; RV64IZCMP-SR-NEXT: sw a3, 112(a5)
; RV64IZCMP-SR-NEXT: sw a4, 108(a5)
-; RV64IZCMP-SR-NEXT: sw a7, 104(a5)
+; RV64IZCMP-SR-NEXT: sw a6, 104(a5)
; RV64IZCMP-SR-NEXT: sw s0, 100(a5)
; RV64IZCMP-SR-NEXT: sw t0, 96(a5)
; RV64IZCMP-SR-NEXT: sw t1, 92(a5)
@@ -2232,13 +2242,13 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV64IZCMP-SR-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: sw a0, 16(a5)
; RV64IZCMP-SR-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(a6)
+; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(a7)
; RV64IZCMP-SR-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(a6)
+; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(a7)
; RV64IZCMP-SR-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(a6)
+; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(a7)
; RV64IZCMP-SR-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(a6)
+; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(a7)
; RV64IZCMP-SR-NEXT: ld t0, 168(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: ld t1, 160(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: ld t2, 152(sp) # 8-byte Folded Reload
@@ -2289,16 +2299,17 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV32I-NEXT: sw t4, 40(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw t5, 36(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw t6, 32(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lui a6, %hi(var_test_irq)
-; RV32I-NEXT: lw a0, %lo(var_test_irq)(a6)
+; RV32I-NEXT: lui a7, %hi(var_test_irq)
+; RV32I-NEXT: lw a0, %lo(var_test_irq)(a7)
; RV32I-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lw a0, %lo(var_test_irq+4)(a6)
+; RV32I-NEXT: lw a0, %lo(var_test_irq+4)(a7)
; RV32I-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lw a0, %lo(var_test_irq+8)(a6)
+; RV32I-NEXT: lw a0, %lo(var_test_irq+8)(a7)
; RV32I-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lw a0, %lo(var_test_irq+12)(a6)
+; RV32I-NEXT: lw a0, %lo(var_test_irq+12)(a7)
; RV32I-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: addi a5, a6, %lo(var_test_irq)
+; RV32I-NEXT: lui a5, %hi(var_test_irq)
+; RV32I-NEXT: addi a5, a5, %lo(var_test_irq)
; RV32I-NEXT: lw a0, 16(a5)
; RV32I-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lw a0, 20(a5)
@@ -2323,7 +2334,7 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV32I-NEXT: lw s10, 92(a5)
; RV32I-NEXT: lw s11, 96(a5)
; RV32I-NEXT: lw ra, 100(a5)
-; RV32I-NEXT: lw a7, 104(a5)
+; RV32I-NEXT: lw a6, 104(a5)
; RV32I-NEXT: lw a4, 108(a5)
; RV32I-NEXT: lw a0, 124(a5)
; RV32I-NEXT: lw a1, 120(a5)
@@ -2334,7 +2345,7 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV32I-NEXT: sw a2, 116(a5)
; RV32I-NEXT: sw a3, 112(a5)
; RV32I-NEXT: sw a4, 108(a5)
-; RV32I-NEXT: sw a7, 104(a5)
+; RV32I-NEXT: sw a6, 104(a5)
; RV32I-NEXT: sw ra, 100(a5)
; RV32I-NEXT: sw s11, 96(a5)
; RV32I-NEXT: sw s10, 92(a5)
@@ -2360,13 +2371,13 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV32I-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: sw a0, 16(a5)
; RV32I-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a0, %lo(var_test_irq+12)(a6)
+; RV32I-NEXT: sw a0, %lo(var_test_irq+12)(a7)
; RV32I-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a0, %lo(var_test_irq+8)(a6)
+; RV32I-NEXT: sw a0, %lo(var_test_irq+8)(a7)
; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a0, %lo(var_test_irq+4)(a6)
+; RV32I-NEXT: sw a0, %lo(var_test_irq+4)(a7)
; RV32I-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a0, %lo(var_test_irq)(a6)
+; RV32I-NEXT: sw a0, %lo(var_test_irq)(a7)
; RV32I-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw t0, 136(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw t1, 132(sp) # 4-byte Folded Reload
@@ -2429,16 +2440,17 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV64I-NEXT: sd t4, 64(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd t5, 56(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd t6, 48(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lui a6, %hi(var_test_irq)
-; RV64I-NEXT: lw a0, %lo(var_test_irq)(a6)
+; RV64I-NEXT: lui a7, %hi(var_test_irq)
+; RV64I-NEXT: lw a0, %lo(var_test_irq)(a7)
; RV64I-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lw a0, %lo(var_test_irq+4)(a6)
+; RV64I-NEXT: lw a0, %lo(var_test_irq+4)(a7)
; RV64I-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lw a0, %lo(var_test_irq+8)(a6)
+; RV64I-NEXT: lw a0, %lo(var_test_irq+8)(a7)
; RV64I-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lw a0, %lo(var_test_irq+12)(a6)
+; RV64I-NEXT: lw a0, %lo(var_test_irq+12)(a7)
; RV64I-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
-; RV64I-NEXT: addi a5, a6, %lo(var_test_irq)
+; RV64I-NEXT: lui a5, %hi(var_test_irq)
+; RV64I-NEXT: addi a5, a5, %lo(var_test_irq)
; RV64I-NEXT: lw a0, 16(a5)
; RV64I-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, 20(a5)
@@ -2463,7 +2475,7 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV64I-NEXT: lw s10, 92(a5)
; RV64I-NEXT: lw s11, 96(a5)
; RV64I-NEXT: lw ra, 100(a5)
-; RV64I-NEXT: lw a7, 104(a5)
+; RV64I-NEXT: lw a6, 104(a5)
; RV64I-NEXT: lw a4, 108(a5)
; RV64I-NEXT: lw a0, 124(a5)
; RV64I-NEXT: lw a1, 120(a5)
@@ -2474,7 +2486,7 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV64I-NEXT: sw a2, 116(a5)
; RV64I-NEXT: sw a3, 112(a5)
; RV64I-NEXT: sw a4, 108(a5)
-; RV64I-NEXT: sw a7, 104(a5)
+; RV64I-NEXT: sw a6, 104(a5)
; RV64I-NEXT: sw ra, 100(a5)
; RV64I-NEXT: sw s11, 96(a5)
; RV64I-NEXT: sw s10, 92(a5)
@@ -2500,13 +2512,13 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV64I-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: sw a0, 16(a5)
; RV64I-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sw a0, %lo(var_test_irq+12)(a6)
+; RV64I-NEXT: sw a0, %lo(var_test_irq+12)(a7)
; RV64I-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sw a0, %lo(var_test_irq+8)(a6)
+; RV64I-NEXT: sw a0, %lo(var_test_irq+8)(a7)
; RV64I-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sw a0, %lo(var_test_irq+4)(a6)
+; RV64I-NEXT: sw a0, %lo(var_test_irq+4)(a7)
; RV64I-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sw a0, %lo(var_test_irq)(a6)
+; RV64I-NEXT: sw a0, %lo(var_test_irq)(a7)
; RV64I-NEXT: ld ra, 264(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld t0, 256(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld t1, 248(sp) # 8-byte Folded Reload
@@ -2546,16 +2558,17 @@ define void @callee_no_irq() nounwind{
; RV32IZCMP-LABEL: callee_no_irq:
; RV32IZCMP: # %bb.0:
; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -96
-; RV32IZCMP-NEXT: lui a6, %hi(var_test_irq)
-; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq)(a6)
+; RV32IZCMP-NEXT: lui a7, %hi(var_test_irq)
+; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq)(a7)
; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(a6)
+; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(a7)
; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(a6)
+; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(a7)
; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(a6)
+; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(a7)
; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: addi a5, a6, %lo(var_test_irq)
+; RV32IZCMP-NEXT: lui a5, %hi(var_test_irq)
+; RV32IZCMP-NEXT: addi a5, a5, %lo(var_test_irq)
; RV32IZCMP-NEXT: lw a0, 16(a5)
; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 20(a5)
@@ -2580,7 +2593,7 @@ define void @callee_no_irq() nounwind{
; RV32IZCMP-NEXT: lw t1, 92(a5)
; RV32IZCMP-NEXT: lw t0, 96(a5)
; RV32IZCMP-NEXT: lw s0, 100(a5)
-; RV32IZCMP-NEXT: lw a7, 104(a5)
+; RV32IZCMP-NEXT: lw a6, 104(a5)
; RV32IZCMP-NEXT: lw a4, 108(a5)
; RV32IZCMP-NEXT: lw a0, 124(a5)
; RV32IZCMP-NEXT: lw a1, 120(a5)
@@ -2591,7 +2604,7 @@ define void @callee_no_irq() nounwind{
; RV32IZCMP-NEXT: sw a2, 116(a5)
; RV32IZCMP-NEXT: sw a3, 112(a5)
; RV32IZCMP-NEXT: sw a4, 108(a5)
-; RV32IZCMP-NEXT: sw a7, 104(a5)
+; RV32IZCMP-NEXT: sw a6, 104(a5)
; RV32IZCMP-NEXT: sw s0, 100(a5)
; RV32IZCMP-NEXT: sw t0, 96(a5)
; RV32IZCMP-NEXT: sw t1, 92(a5)
@@ -2617,28 +2630,29 @@ define void @callee_no_irq() nounwind{
; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: sw a0, 16(a5)
; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(a6)
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(a7)
; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(a6)
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(a7)
; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(a6)
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(a7)
; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq)(a6)
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq)(a7)
; RV32IZCMP-NEXT: cm.popret {ra, s0-s11}, 96
;
; RV64IZCMP-LABEL: callee_no_irq:
; RV64IZCMP: # %bb.0:
; RV64IZCMP-NEXT: cm.push {ra, s0-s11}, -160
-; RV64IZCMP-NEXT: lui a6, %hi(var_test_irq)
-; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq)(a6)
+; RV64IZCMP-NEXT: lui a7, %hi(var_test_irq)
+; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq)(a7)
; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(a6)
+; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(a7)
; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(a6)
+; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(a7)
; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(a6)
+; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(a7)
; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: addi a5, a6, %lo(var_test_irq)
+; RV64IZCMP-NEXT: lui a5, %hi(var_test_irq)
+; RV64IZCMP-NEXT: addi a5, a5, %lo(var_test_irq)
; RV64IZCMP-NEXT: lw a0, 16(a5)
; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: lw a0, 20(a5)
@@ -2663,7 +2677,7 @@ define void @callee_no_irq() nounwind{
; RV64IZCMP-NEXT: lw t1, 92(a5)
; RV64IZCMP-NEXT: lw t0, 96(a5)
; RV64IZCMP-NEXT: lw s0, 100(a5)
-; RV64IZCMP-NEXT: lw a7, 104(a5)
+; RV64IZCMP-NEXT: lw a6, 104(a5)
; RV64IZCMP-NEXT: lw a4, 108(a5)
; RV64IZCMP-NEXT: lw a0, 124(a5)
; RV64IZCMP-NEXT: lw a1, 120(a5)
@@ -2674,7 +2688,7 @@ define void @callee_no_irq() nounwind{
; RV64IZCMP-NEXT: sw a2, 116(a5)
; RV64IZCMP-NEXT: sw a3, 112(a5)
; RV64IZCMP-NEXT: sw a4, 108(a5)
-; RV64IZCMP-NEXT: sw a7, 104(a5)
+; RV64IZCMP-NEXT: sw a6, 104(a5)
; RV64IZCMP-NEXT: sw s0, 100(a5)
; RV64IZCMP-NEXT: sw t0, 96(a5)
; RV64IZCMP-NEXT: sw t1, 92(a5)
@@ -2700,28 +2714,29 @@ define void @callee_no_irq() nounwind{
; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 16(a5)
; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(a6)
+; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(a7)
; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(a6)
+; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(a7)
; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(a6)
+; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(a7)
; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq)(a6)
+; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq)(a7)
; RV64IZCMP-NEXT: cm.popret {ra, s0-s11}, 160
;
; RV32IZCMP-SR-LABEL: callee_no_irq:
; RV32IZCMP-SR: # %bb.0:
; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -96
-; RV32IZCMP-SR-NEXT: lui a6, %hi(var_test_irq)
-; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(a6)
+; RV32IZCMP-SR-NEXT: lui a7, %hi(var_test_irq)
+; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(a7)
; RV32IZCMP-SR-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(a6)
+; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(a7)
; RV32IZCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(a6)
+; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(a7)
; RV32IZCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(a6)
+; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(a7)
; RV32IZCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: addi a5, a6, %lo(var_test_irq)
+; RV32IZCMP-SR-NEXT: lui a5, %hi(var_test_irq)
+; RV32IZCMP-SR-NEXT: addi a5, a5, %lo(var_test_irq)
; RV32IZCMP-SR-NEXT: lw a0, 16(a5)
; RV32IZCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: lw a0, 20(a5)
@@ -2746,7 +2761,7 @@ define void @callee_no_irq() nounwind{
; RV32IZCMP-SR-NEXT: lw t1, 92(a5)
; RV32IZCMP-SR-NEXT: lw t0, 96(a5)
; RV32IZCMP-SR-NEXT: lw s0, 100(a5)
-; RV32IZCMP-SR-NEXT: lw a7, 104(a5)
+; RV32IZCMP-SR-NEXT: lw a6, 104(a5)
; RV32IZCMP-SR-NEXT: lw a4, 108(a5)
; RV32IZCMP-SR-NEXT: lw a0, 124(a5)
; RV32IZCMP-SR-NEXT: lw a1, 120(a5)
@@ -2757,7 +2772,7 @@ define void @callee_no_irq() nounwind{
; RV32IZCMP-SR-NEXT: sw a2, 116(a5)
; RV32IZCMP-SR-NEXT: sw a3, 112(a5)
; RV32IZCMP-SR-NEXT: sw a4, 108(a5)
-; RV32IZCMP-SR-NEXT: sw a7, 104(a5)
+; RV32IZCMP-SR-NEXT: sw a6, 104(a5)
; RV32IZCMP-SR-NEXT: sw s0, 100(a5)
; RV32IZCMP-SR-NEXT: sw t0, 96(a5)
; RV32IZCMP-SR-NEXT: sw t1, 92(a5)
@@ -2783,28 +2798,29 @@ define void @callee_no_irq() nounwind{
; RV32IZCMP-SR-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
; RV32IZCMP-SR-NEXT: sw a0, 16(a5)
; RV32IZCMP-SR-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(a6)
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(a7)
; RV32IZCMP-SR-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(a6)
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(a7)
; RV32IZCMP-SR-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(a6)
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(a7)
; RV32IZCMP-SR-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(a6)
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(a7)
; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s11}, 96
;
; RV64IZCMP-SR-LABEL: callee_no_irq:
; RV64IZCMP-SR: # %bb.0:
; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -160
-; RV64IZCMP-SR-NEXT: lui a6, %hi(var_test_irq)
-; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(a6)
+; RV64IZCMP-SR-NEXT: lui a7, %hi(var_test_irq)
+; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(a7)
; RV64IZCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(a6)
+; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(a7)
; RV64IZCMP-SR-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(a6)
+; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(a7)
; RV64IZCMP-SR-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(a6)
+; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(a7)
; RV64IZCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: addi a5, a6, %lo(var_test_irq)
+; RV64IZCMP-SR-NEXT: lui a5, %hi(var_test_irq)
+; RV64IZCMP-SR-NEXT: addi a5, a5, %lo(var_test_irq)
; RV64IZCMP-SR-NEXT: lw a0, 16(a5)
; RV64IZCMP-SR-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: lw a0, 20(a5)
@@ -2829,7 +2845,7 @@ define void @callee_no_irq() nounwind{
; RV64IZCMP-SR-NEXT: lw t1, 92(a5)
; RV64IZCMP-SR-NEXT: lw t0, 96(a5)
; RV64IZCMP-SR-NEXT: lw s0, 100(a5)
-; RV64IZCMP-SR-NEXT: lw a7, 104(a5)
+; RV64IZCMP-SR-NEXT: lw a6, 104(a5)
; RV64IZCMP-SR-NEXT: lw a4, 108(a5)
; RV64IZCMP-SR-NEXT: lw a0, 124(a5)
; RV64IZCMP-SR-NEXT: lw a1, 120(a5)
@@ -2840,7 +2856,7 @@ define void @callee_no_irq() nounwind{
; RV64IZCMP-SR-NEXT: sw a2, 116(a5)
; RV64IZCMP-SR-NEXT: sw a3, 112(a5)
; RV64IZCMP-SR-NEXT: sw a4, 108(a5)
-; RV64IZCMP-SR-NEXT: sw a7, 104(a5)
+; RV64IZCMP-SR-NEXT: sw a6, 104(a5)
; RV64IZCMP-SR-NEXT: sw s0, 100(a5)
; RV64IZCMP-SR-NEXT: sw t0, 96(a5)
; RV64IZCMP-SR-NEXT: sw t1, 92(a5)
@@ -2866,13 +2882,13 @@ define void @callee_no_irq() nounwind{
; RV64IZCMP-SR-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: sw a0, 16(a5)
; RV64IZCMP-SR-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(a6)
+; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(a7)
; RV64IZCMP-SR-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(a6)
+; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(a7)
; RV64IZCMP-SR-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(a6)
+; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(a7)
; RV64IZCMP-SR-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(a6)
+; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(a7)
; RV64IZCMP-SR-NEXT: cm.popret {ra, s0-s11}, 160
;
; RV32I-LABEL: callee_no_irq:
@@ -2891,16 +2907,17 @@ define void @callee_no_irq() nounwind{
; RV32I-NEXT: sw s9, 36(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s10, 32(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s11, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lui a6, %hi(var_test_irq)
-; RV32I-NEXT: lw a0, %lo(var_test_irq)(a6)
+; RV32I-NEXT: lui a7, %hi(var_test_irq)
+; RV32I-NEXT: lw a0, %lo(var_test_irq)(a7)
; RV32I-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lw a0, %lo(var_test_irq+4)(a6)
+; RV32I-NEXT: lw a0, %lo(var_test_irq+4)(a7)
; RV32I-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lw a0, %lo(var_test_irq+8)(a6)
+; RV32I-NEXT: lw a0, %lo(var_test_irq+8)(a7)
; RV32I-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lw a0, %lo(var_test_irq+12)(a6)
+; RV32I-NEXT: lw a0, %lo(var_test_irq+12)(a7)
; RV32I-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: addi a5, a6, %lo(var_test_irq)
+; RV32I-NEXT: lui a5, %hi(var_test_irq)
+; RV32I-NEXT: addi a5, a5, %lo(var_test_irq)
; RV32I-NEXT: lw a0, 16(a5)
; RV32I-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: lw a0, 20(a5)
@@ -2925,7 +2942,7 @@ define void @callee_no_irq() nounwind{
; RV32I-NEXT: lw s10, 92(a5)
; RV32I-NEXT: lw s11, 96(a5)
; RV32I-NEXT: lw ra, 100(a5)
-; RV32I-NEXT: lw a7, 104(a5)
+; RV32I-NEXT: lw a6, 104(a5)
; RV32I-NEXT: lw a4, 108(a5)
; RV32I-NEXT: lw a0, 124(a5)
; RV32I-NEXT: lw a1, 120(a5)
@@ -2936,7 +2953,7 @@ define void @callee_no_irq() nounwind{
; RV32I-NEXT: sw a2, 116(a5)
; RV32I-NEXT: sw a3, 112(a5)
; RV32I-NEXT: sw a4, 108(a5)
-; RV32I-NEXT: sw a7, 104(a5)
+; RV32I-NEXT: sw a6, 104(a5)
; RV32I-NEXT: sw ra, 100(a5)
; RV32I-NEXT: sw s11, 96(a5)
; RV32I-NEXT: sw s10, 92(a5)
@@ -2962,13 +2979,13 @@ define void @callee_no_irq() nounwind{
; RV32I-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: sw a0, 16(a5)
; RV32I-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a0, %lo(var_test_irq+12)(a6)
+; RV32I-NEXT: sw a0, %lo(var_test_irq+12)(a7)
; RV32I-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a0, %lo(var_test_irq+8)(a6)
+; RV32I-NEXT: sw a0, %lo(var_test_irq+8)(a7)
; RV32I-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a0, %lo(var_test_irq+4)(a6)
+; RV32I-NEXT: sw a0, %lo(var_test_irq+4)(a7)
; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a0, %lo(var_test_irq)(a6)
+; RV32I-NEXT: sw a0, %lo(var_test_irq)(a7)
; RV32I-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 68(sp) # 4-byte Folded Reload
@@ -3001,16 +3018,17 @@ define void @callee_no_irq() nounwind{
; RV64I-NEXT: sd s9, 72(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s10, 64(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s11, 56(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lui a6, %hi(var_test_irq)
-; RV64I-NEXT: lw a0, %lo(var_test_irq)(a6)
+; RV64I-NEXT: lui a7, %hi(var_test_irq)
+; RV64I-NEXT: lw a0, %lo(var_test_irq)(a7)
; RV64I-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lw a0, %lo(var_test_irq+4)(a6)
+; RV64I-NEXT: lw a0, %lo(var_test_irq+4)(a7)
; RV64I-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lw a0, %lo(var_test_irq+8)(a6)
+; RV64I-NEXT: lw a0, %lo(var_test_irq+8)(a7)
; RV64I-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lw a0, %lo(var_test_irq+12)(a6)
+; RV64I-NEXT: lw a0, %lo(var_test_irq+12)(a7)
; RV64I-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: addi a5, a6, %lo(var_test_irq)
+; RV64I-NEXT: lui a5, %hi(var_test_irq)
+; RV64I-NEXT: addi a5, a5, %lo(var_test_irq)
; RV64I-NEXT: lw a0, 16(a5)
; RV64I-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, 20(a5)
@@ -3035,7 +3053,7 @@ define void @callee_no_irq() nounwind{
; RV64I-NEXT: lw s10, 92(a5)
; RV64I-NEXT: lw s11, 96(a5)
; RV64I-NEXT: lw ra, 100(a5)
-; RV64I-NEXT: lw a7, 104(a5)
+; RV64I-NEXT: lw a6, 104(a5)
; RV64I-NEXT: lw a4, 108(a5)
; RV64I-NEXT: lw a0, 124(a5)
; RV64I-NEXT: lw a1, 120(a5)
@@ -3046,7 +3064,7 @@ define void @callee_no_irq() nounwind{
; RV64I-NEXT: sw a2, 116(a5)
; RV64I-NEXT: sw a3, 112(a5)
; RV64I-NEXT: sw a4, 108(a5)
-; RV64I-NEXT: sw a7, 104(a5)
+; RV64I-NEXT: sw a6, 104(a5)
; RV64I-NEXT: sw ra, 100(a5)
; RV64I-NEXT: sw s11, 96(a5)
; RV64I-NEXT: sw s10, 92(a5)
@@ -3072,13 +3090,13 @@ define void @callee_no_irq() nounwind{
; RV64I-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: sw a0, 16(a5)
; RV64I-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sw a0, %lo(var_test_irq+12)(a6)
+; RV64I-NEXT: sw a0, %lo(var_test_irq+12)(a7)
; RV64I-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sw a0, %lo(var_test_irq+8)(a6)
+; RV64I-NEXT: sw a0, %lo(var_test_irq+8)(a7)
; RV64I-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sw a0, %lo(var_test_irq+4)(a6)
+; RV64I-NEXT: sw a0, %lo(var_test_irq+4)(a7)
; RV64I-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sw a0, %lo(var_test_irq)(a6)
+; RV64I-NEXT: sw a0, %lo(var_test_irq)(a7)
; RV64I-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 144(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 136(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll
index b45ab135fa1c7..197366e7e05fe 100644
--- a/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll
@@ -209,8 +209,8 @@ define i64 @cttz_i64(i64 %a) nounwind {
; RV32I-NEXT: mv a1, s3
; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: lui a0, %hi(.LCPI3_0)
-; RV32I-NEXT: addi s4, a0, %lo(.LCPI3_0)
+; RV32I-NEXT: lui s4, %hi(.LCPI3_0)
+; RV32I-NEXT: addi s4, s4, %lo(.LCPI3_0)
; RV32I-NEXT: neg a0, s2
; RV32I-NEXT: and a0, s2, a0
; RV32I-NEXT: mv a1, s3
diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll
index 7e6c3f9c87d27..f25aa0de89da8 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll
@@ -199,8 +199,8 @@ define i64 @cttz_i64(i64 %a) nounwind {
; RV32I-NEXT: mv a1, s3
; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: lui a0, %hi(.LCPI3_0)
-; RV32I-NEXT: addi s4, a0, %lo(.LCPI3_0)
+; RV32I-NEXT: lui s4, %hi(.LCPI3_0)
+; RV32I-NEXT: addi s4, s4, %lo(.LCPI3_0)
; RV32I-NEXT: neg a0, s2
; RV32I-NEXT: and a0, s2, a0
; RV32I-NEXT: mv a1, s3
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/mem64.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/mem64.ll
index de4c21f324688..22a25d6703ff5 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/mem64.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/mem64.ll
@@ -177,10 +177,10 @@ define dso_local i64 @ld_sd_global(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: lui a2, %hi(G)
; RV64I-NEXT: ld a1, %lo(G)(a2)
-; RV64I-NEXT: addi a3, a2, %lo(G)
+; RV64I-NEXT: lui a3, %hi(G+72)
; RV64I-NEXT: sd a0, %lo(G)(a2)
-; RV64I-NEXT: ld zero, 72(a3)
-; RV64I-NEXT: sd a0, 72(a3)
+; RV64I-NEXT: ld zero, %lo(G+72)(a3)
+; RV64I-NEXT: sd a0, %lo(G+72)(a3)
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: ret
%1 = load volatile i64, ptr @G
diff --git a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll
index 9cb3991f31f94..08b310213d16e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll
@@ -126,28 +126,28 @@ define <64 x i1> @fv64(ptr %p, i64 %index, i64 %tc) {
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vid.v v8
; CHECK-NEXT: vsaddu.vx v8, v8, a1
-; CHECK-NEXT: vmsltu.vx v0, v8, a2
; CHECK-NEXT: lui a0, %hi(.LCPI9_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_0)
-; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v16, (a0)
+; CHECK-NEXT: vmsltu.vx v0, v8, a2
; CHECK-NEXT: lui a0, %hi(.LCPI9_1)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_1)
-; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsext.vf8 v24, v16
+; CHECK-NEXT: vsaddu.vx v16, v24, a1
+; CHECK-NEXT: vmsltu.vx v9, v16, a2
; CHECK-NEXT: vsext.vf8 v16, v8
; CHECK-NEXT: vsaddu.vx v16, v16, a1
-; CHECK-NEXT: vmsltu.vx v8, v16, a2
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vsaddu.vx v16, v16, a1
; CHECK-NEXT: lui a0, %hi(.LCPI9_2)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_2)
-; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmsltu.vx v10, v16, a2
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v0, v8, 2
+; CHECK-NEXT: vslideup.vi v0, v9, 2
; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
; CHECK-NEXT: vslideup.vi v0, v10, 4
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v9
+; CHECK-NEXT: vsext.vf8 v16, v8
; CHECK-NEXT: vsaddu.vx v8, v16, a1
; CHECK-NEXT: vmsltu.vx v16, v8, a2
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
@@ -169,13 +169,13 @@ define <128 x i1> @fv128(ptr %p, i64 %index, i64 %tc) {
; CHECK-NEXT: vle8.v v9, (a0)
; CHECK-NEXT: vsext.vf8 v16, v8
; CHECK-NEXT: vsaddu.vx v16, v16, a1
-; CHECK-NEXT: vmsltu.vx v10, v16, a2
+; CHECK-NEXT: vmsltu.vx v8, v16, a2
; CHECK-NEXT: vsext.vf8 v16, v9
; CHECK-NEXT: vsaddu.vx v16, v16, a1
-; CHECK-NEXT: vmsltu.vx v8, v16, a2
; CHECK-NEXT: lui a0, %hi(.LCPI10_2)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_2)
; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vmsltu.vx v10, v16, a2
; CHECK-NEXT: lui a0, %hi(.LCPI10_3)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_3)
; CHECK-NEXT: vle8.v v11, (a0)
@@ -187,10 +187,10 @@ define <128 x i1> @fv128(ptr %p, i64 %index, i64 %tc) {
; CHECK-NEXT: vmsltu.vx v11, v16, a2
; CHECK-NEXT: vid.v v16
; CHECK-NEXT: vsaddu.vx v16, v16, a1
-; CHECK-NEXT: vmsltu.vx v0, v16, a2
; CHECK-NEXT: lui a0, %hi(.LCPI10_4)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_4)
; CHECK-NEXT: vle8.v v12, (a0)
+; CHECK-NEXT: vmsltu.vx v0, v16, a2
; CHECK-NEXT: lui a0, %hi(.LCPI10_5)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_5)
; CHECK-NEXT: vle8.v v13, (a0)
@@ -201,27 +201,27 @@ define <128 x i1> @fv128(ptr %p, i64 %index, i64 %tc) {
; CHECK-NEXT: vsaddu.vx v16, v16, a1
; CHECK-NEXT: vmsltu.vx v13, v16, a2
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v10, 2
+; CHECK-NEXT: vslideup.vi v10, v8, 2
; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 4
+; CHECK-NEXT: vslideup.vi v10, v9, 4
; CHECK-NEXT: lui a0, %hi(.LCPI10_6)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_6)
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v11, 6
+; CHECK-NEXT: vslideup.vi v10, v11, 6
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
; CHECK-NEXT: vslideup.vi v0, v12, 2
; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
; CHECK-NEXT: vslideup.vi v0, v13, 4
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v9
+; CHECK-NEXT: vsext.vf8 v16, v8
; CHECK-NEXT: vsaddu.vx v16, v16, a1
-; CHECK-NEXT: vmsltu.vx v9, v16, a2
+; CHECK-NEXT: vmsltu.vx v8, v16, a2
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v0, v9, 6
+; CHECK-NEXT: vslideup.vi v0, v8, 6
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v0, v8, 8
+; CHECK-NEXT: vslideup.vi v0, v10, 8
; CHECK-NEXT: ret
%mask = call <128 x i1> @llvm.get.active.lane.mask.v128i1.i64(i64 %index, i64 %tc)
ret <128 x i1> %mask
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index 79c36a629465d..f4d7074c7f6b2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -3459,6 +3459,8 @@ define void @mulhu_v4i64(ptr %x) {
; RV64-NEXT: lui a1, %hi(.LCPI184_0)
; RV64-NEXT: addi a1, a1, %lo(.LCPI184_0)
; RV64-NEXT: vle64.v v10, (a1)
+; RV64-NEXT: vmulhu.vv v10, v8, v10
+; RV64-NEXT: vsub.vv v8, v8, v10
; RV64-NEXT: li a1, -1
; RV64-NEXT: slli a1, a1, 63
; RV64-NEXT: vmv.s.x v12, a1
@@ -3466,8 +3468,6 @@ define void @mulhu_v4i64(ptr %x) {
; RV64-NEXT: vsetivli zero, 3, e64, m2, tu, ma
; RV64-NEXT: vslideup.vi v14, v12, 2
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT: vmulhu.vv v10, v8, v10
-; RV64-NEXT: vsub.vv v8, v8, v10
; RV64-NEXT: vmulhu.vv v8, v8, v14
; RV64-NEXT: vadd.vv v8, v8, v10
; RV64-NEXT: lui a1, 12320
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
index 178a920169ad9..bc3e135a588a6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
@@ -159,17 +159,16 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: li a3, 82
+; RV32-NEXT: li a3, 80
; RV32-NEXT: mul a2, a2, a3
; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd2, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 82 * vlenb
+; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd0, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 80 * vlenb
; RV32-NEXT: addi a3, a1, 256
; RV32-NEXT: li a2, 32
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vle32.v v16, (a3)
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 57
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 6
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
@@ -177,26 +176,26 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vslideup.vi v8, v16, 4
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: li a5, 41
+; RV32-NEXT: li a5, 40
; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs4r.v v8, (a4) # Unknown-size Folded Spill
; RV32-NEXT: lui a4, 12
-; RV32-NEXT: vmv.s.x v1, a4
+; RV32-NEXT: vmv.s.x v0, a4
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV32-NEXT: vslidedown.vi v16, v16, 16
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: slli a5, a4, 6
-; RV32-NEXT: add a4, a5, a4
+; RV32-NEXT: li a5, 56
+; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v3, v0
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; RV32-NEXT: vslideup.vi v8, v16, 10, v0.t
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: li a5, 45
+; RV32-NEXT: li a5, 44
; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
@@ -206,8 +205,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
; RV32-NEXT: vle16.v v8, (a4)
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: slli a5, a4, 5
-; RV32-NEXT: add a4, a5, a4
+; RV32-NEXT: slli a4, a4, 5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs4r.v v8, (a4) # Unknown-size Folded Spill
@@ -216,21 +214,21 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: lui a5, 1
; RV32-NEXT: vle16.v v8, (a4)
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: li a6, 25
+; RV32-NEXT: li a6, 24
; RV32-NEXT: mul a4, a4, a6
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs4r.v v8, (a4) # Unknown-size Folded Spill
; RV32-NEXT: vle32.v v8, (a1)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a4, 73
+; RV32-NEXT: li a4, 72
; RV32-NEXT: mul a1, a1, a4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; RV32-NEXT: vle32.v v24, (a3)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 49
+; RV32-NEXT: li a3, 48
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
@@ -238,27 +236,26 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: addi a1, a5, -64
; RV32-NEXT: vmv.s.x v0, a1
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 37
+; RV32-NEXT: li a3, 36
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 5
-; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: slli a1, a1, 5
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl4r.v v4, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vrgatherei16.vv v16, v8, v4
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 25
+; RV32-NEXT: li a3, 24
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vrgatherei16.vv v16, v24, v8, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 45
+; RV32-NEXT: li a3, 44
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
@@ -266,259 +263,257 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetivli zero, 12, e32, m4, tu, ma
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 45
+; RV32-NEXT: li a3, 44
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 57
-; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: slli a1, a1, 6
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; RV32-NEXT: vslideup.vi v12, v8, 2
+; RV32-NEXT: vmv1r.v v8, v3
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 21
-; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs1r.v v1, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vs1r.v v3, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vmv1r.v v0, v3
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 6
-; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: li a3, 56
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vslideup.vi v12, v16, 8, v0.t
-; RV32-NEXT: vmv.v.v v20, v12
; RV32-NEXT: lui a1, %hi(.LCPI6_2)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_2)
; RV32-NEXT: lui a3, %hi(.LCPI6_3)
; RV32-NEXT: addi a3, a3, %lo(.LCPI6_3)
-; RV32-NEXT: lui a4, %hi(.LCPI6_4)
; RV32-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; RV32-NEXT: vle16.v v4, (a1)
-; RV32-NEXT: vle16.v v16, (a3)
-; RV32-NEXT: addi a1, a4, %lo(.LCPI6_4)
+; RV32-NEXT: vle16.v v0, (a1)
+; RV32-NEXT: vle16.v v4, (a3)
+; RV32-NEXT: lui a1, %hi(.LCPI6_4)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI6_4)
; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vle16.v v2, (a1)
+; RV32-NEXT: vle16.v v10, (a1)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 73
+; RV32-NEXT: li a3, 72
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
-; RV32-NEXT: vrgatherei16.vv v24, v8, v4
+; RV32-NEXT: vrgatherei16.vv v24, v16, v0
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 37
+; RV32-NEXT: li a3, 36
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 49
+; RV32-NEXT: li a3, 48
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v24, v8, v16, v0.t
+; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vrgatherei16.vv v24, v16, v4, v0.t
; RV32-NEXT: vsetivli zero, 12, e32, m4, tu, ma
-; RV32-NEXT: vmv.v.v v20, v24
+; RV32-NEXT: vmv.v.v v12, v24
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 37
+; RV32-NEXT: li a3, 36
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v20, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 57
-; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: slli a1, a1, 6
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
-; RV32-NEXT: vrgatherei16.vv v16, v24, v2
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vrgatherei16.vv v12, v24, v10
+; RV32-NEXT: vmv1r.v v0, v8
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 6
-; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: li a3, 56
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslideup.vi v16, v8, 6, v0.t
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslideup.vi v12, v24, 6, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 5
-; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: slli a1, a1, 5
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, %hi(.LCPI6_5)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_5)
; RV32-NEXT: lui a3, %hi(.LCPI6_6)
; RV32-NEXT: addi a3, a3, %lo(.LCPI6_6)
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
-; RV32-NEXT: vle16.v v16, (a1)
-; RV32-NEXT: vle16.v v4, (a3)
-; RV32-NEXT: li a1, 960
-; RV32-NEXT: vmv.s.x v0, a1
+; RV32-NEXT: vle16.v v12, (a1)
+; RV32-NEXT: vle16.v v8, (a3)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 13
+; RV32-NEXT: li a3, 12
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: li a1, 960
+; RV32-NEXT: vmv.s.x v8, a1
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 73
+; RV32-NEXT: li a3, 72
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v24, v16
+; RV32-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vrgatherei16.vv v24, v0, v12
+; RV32-NEXT: vmv1r.v v3, v8
+; RV32-NEXT: vmv1r.v v0, v8
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 49
+; RV32-NEXT: li a3, 12
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v16, v4, v0.t
+; RV32-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vrgatherei16.vv v24, v16, v8, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 25
+; RV32-NEXT: li a3, 24
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, %hi(.LCPI6_7)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_7)
; RV32-NEXT: lui a3, %hi(.LCPI6_8)
; RV32-NEXT: addi a3, a3, %lo(.LCPI6_8)
-; RV32-NEXT: lui a4, %hi(.LCPI6_9)
; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: addi a1, a4, %lo(.LCPI6_9)
+; RV32-NEXT: lui a1, %hi(.LCPI6_9)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI6_9)
; RV32-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; RV32-NEXT: vle16.v v24, (a3)
-; RV32-NEXT: vle16.v v28, (a1)
+; RV32-NEXT: vle16.v v4, (a3)
+; RV32-NEXT: vle16.v v12, (a1)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 57
-; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: slli a1, a1, 3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 6
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
-; RV32-NEXT: vrgatherei16.vv v4, v0, v8
+; RV32-NEXT: vrgatherei16.vv v12, v24, v8
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 21
-; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 6
-; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: li a3, 56
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslideup.vi v4, v8, 4, v0.t
+; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vmv4r.v v24, v16
+; RV32-NEXT: vslideup.vi v12, v16, 4, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 21
+; RV32-NEXT: li a3, 12
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v4, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 73
+; RV32-NEXT: li a3, 72
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
-; RV32-NEXT: vrgatherei16.vv v8, v0, v24
+; RV32-NEXT: vrgatherei16.vv v8, v16, v4
+; RV32-NEXT: vmv1r.v v0, v3
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 13
+; RV32-NEXT: li a3, 48
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl4r.v v28, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vrgatherei16.vv v8, v16, v28, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 13
-; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, %hi(.LCPI6_10)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_10)
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
-; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: vle16.v v12, (a1)
; RV32-NEXT: lui a1, 15
; RV32-NEXT: vmv.s.x v3, a1
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 57
-; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: slli a1, a1, 6
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslideup.vi v12, v16, 6
+; RV32-NEXT: vslideup.vi v8, v16, 6
; RV32-NEXT: vmv1r.v v0, v3
+; RV32-NEXT: vrgatherei16.vv v8, v24, v12, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 6
-; RV32-NEXT: add a1, a3, a1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v12, v16, v8, v0.t
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 57
-; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: slli a1, a1, 2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, %hi(.LCPI6_11)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_11)
; RV32-NEXT: lui a3, %hi(.LCPI6_12)
; RV32-NEXT: addi a3, a3, %lo(.LCPI6_12)
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: vle16.v v12, (a3)
+; RV32-NEXT: vle16.v v24, (a1)
+; RV32-NEXT: vle16.v v4, (a3)
; RV32-NEXT: li a1, 1008
; RV32-NEXT: vmv.s.x v0, a1
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 2
+; RV32-NEXT: slli a1, a1, 3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 73
+; RV32-NEXT: li a3, 72
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v24, v16, v8
+; RV32-NEXT: vrgatherei16.vv v8, v16, v24
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 49
+; RV32-NEXT: li a3, 48
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v24, v16, v12, v0.t
+; RV32-NEXT: vrgatherei16.vv v8, v16, v4, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 2
-; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: slli a1, a1, 6
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, %hi(.LCPI6_13)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_13)
; RV32-NEXT: lui a3, %hi(.LCPI6_14)
; RV32-NEXT: addi a3, a3, %lo(.LCPI6_14)
-; RV32-NEXT: lui a4, %hi(.LCPI6_15)
; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; RV32-NEXT: vle16.v v20, (a1)
-; RV32-NEXT: addi a1, a4, %lo(.LCPI6_15)
+; RV32-NEXT: lui a1, %hi(.LCPI6_15)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI6_15)
; RV32-NEXT: vsetvli zero, a2, e16, m4, ta, ma
; RV32-NEXT: vle16.v v24, (a3)
; RV32-NEXT: vle16.v v8, (a1)
@@ -526,27 +521,26 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill
; RV32-NEXT: vmv1r.v v0, v3
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 41
+; RV32-NEXT: li a3, 40
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl4r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 6
-; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: li a3, 56
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; RV32-NEXT: vrgatherei16.vv v16, v8, v20, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 5
-; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: slli a1, a1, 5
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl4r.v v20, (a1) # Unknown-size Folded Reload
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 25
+; RV32-NEXT: li a3, 24
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
@@ -554,7 +548,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
; RV32-NEXT: vmv.v.v v20, v8
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 73
+; RV32-NEXT: li a3, 72
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
@@ -562,12 +556,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
; RV32-NEXT: vrgatherei16.vv v8, v0, v24
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 2
+; RV32-NEXT: slli a1, a1, 3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 49
+; RV32-NEXT: li a2, 48
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
@@ -576,31 +570,28 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vl4r.v v4, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vrgatherei16.vv v8, v24, v4, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 21
-; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl4r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 13
+; RV32-NEXT: li a2, 12
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vl4r.v v24, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
; RV32-NEXT: vmv.v.v v24, v0
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 57
-; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: slli a1, a1, 6
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl4r.v v28, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 2
-; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: slli a1, a1, 2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vl4r.v v28, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vmv.v.v v28, v0
; RV32-NEXT: vmv.v.v v16, v8
; RV32-NEXT: addi a1, a0, 320
@@ -614,21 +605,21 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vse32.v v20, (a1)
; RV32-NEXT: addi a1, a0, 64
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: li a3, 37
+; RV32-NEXT: li a3, 36
; RV32-NEXT: mul a2, a2, a3
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 16
; RV32-NEXT: vl4r.v v8, (a2) # Unknown-size Folded Reload
; RV32-NEXT: vse32.v v8, (a1)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 45
+; RV32-NEXT: li a2, 44
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vse32.v v8, (a0)
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 82
+; RV32-NEXT: li a1, 80
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
index 1748315186936..7608349ef7aef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
@@ -549,20 +549,20 @@ define <128 x i1> @buildvec_mask_v128i1() {
define <128 x i1> @buildvec_mask_optsize_v128i1() optsize {
; CHECK-LABEL: buildvec_mask_optsize_v128i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI21_0)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI21_0)
-; CHECK-NEXT: li a1, 128
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vlm.v v0, (a0)
+; CHECK-NEXT: li a0, 128
+; CHECK-NEXT: lui a1, %hi(.LCPI21_0)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI21_0)
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v0, (a1)
; CHECK-NEXT: ret
;
; ZVE32F-LABEL: buildvec_mask_optsize_v128i1:
; ZVE32F: # %bb.0:
-; ZVE32F-NEXT: lui a0, %hi(.LCPI21_0)
-; ZVE32F-NEXT: addi a0, a0, %lo(.LCPI21_0)
-; ZVE32F-NEXT: li a1, 128
-; ZVE32F-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; ZVE32F-NEXT: vlm.v v0, (a0)
+; ZVE32F-NEXT: li a0, 128
+; ZVE32F-NEXT: lui a1, %hi(.LCPI21_0)
+; ZVE32F-NEXT: addi a1, a1, %lo(.LCPI21_0)
+; ZVE32F-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; ZVE32F-NEXT: vlm.v v0, (a1)
; ZVE32F-NEXT: ret
ret <128 x i1> <i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 0, i1 1, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 1, i1 1, i1 1>
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index db0969c85a8e2..69341981288b9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -13327,22 +13327,22 @@ define <8 x i16> @mgather_shuffle_rotate(ptr %base) {
define <8 x i16> @mgather_shuffle_vrgather(ptr %base) {
; RV32-LABEL: mgather_shuffle_vrgather:
; RV32: # %bb.0:
+; RV32-NEXT: lui a1, %hi(.LCPI119_0)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI119_0)
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v9, (a0)
-; RV32-NEXT: lui a0, %hi(.LCPI119_0)
-; RV32-NEXT: addi a0, a0, %lo(.LCPI119_0)
+; RV32-NEXT: vle16.v v9, (a1)
; RV32-NEXT: vle16.v v10, (a0)
-; RV32-NEXT: vrgather.vv v8, v9, v10
+; RV32-NEXT: vrgather.vv v8, v10, v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_shuffle_vrgather:
; RV64V: # %bb.0:
+; RV64V-NEXT: lui a1, %hi(.LCPI119_0)
+; RV64V-NEXT: addi a1, a1, %lo(.LCPI119_0)
; RV64V-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64V-NEXT: vle16.v v9, (a0)
-; RV64V-NEXT: lui a0, %hi(.LCPI119_0)
-; RV64V-NEXT: addi a0, a0, %lo(.LCPI119_0)
+; RV64V-NEXT: vle16.v v9, (a1)
; RV64V-NEXT: vle16.v v10, (a0)
-; RV64V-NEXT: vrgather.vv v8, v9, v10
+; RV64V-NEXT: vrgather.vv v8, v10, v9
; RV64V-NEXT: ret
;
; RV64ZVE32F-LABEL: mgather_shuffle_vrgather:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
index d70ed2fb0e266..4b1f0beb48700 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
@@ -228,11 +228,11 @@ define <16 x i8> @reverse_v16i8(<16 x i8> %a) {
define <32 x i8> @reverse_v32i8(<32 x i8> %a) {
; CHECK-LABEL: reverse_v32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI12_0)
-; CHECK-NEXT: li a1, 32
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vle8.v v12, (a0)
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: lui a1, %hi(.LCPI12_0)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI12_0)
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v12, (a1)
; CHECK-NEXT: vrgather.vv v10, v8, v12
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
@@ -243,11 +243,11 @@ define <32 x i8> @reverse_v32i8(<32 x i8> %a) {
define <64 x i8> @reverse_v64i8(<64 x i8> %a) {
; CHECK-LABEL: reverse_v64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI13_0)
-; CHECK-NEXT: li a1, 64
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vle8.v v16, (a0)
+; CHECK-NEXT: li a0, 64
+; CHECK-NEXT: lui a1, %hi(.LCPI13_0)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI13_0)
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vle8.v v16, (a1)
; CHECK-NEXT: vrgather.vv v12, v8, v16
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
@@ -323,11 +323,11 @@ define <16 x i16> @reverse_v16i16(<16 x i16> %a) {
define <32 x i16> @reverse_v32i16(<32 x i16> %a) {
; CHECK-LABEL: reverse_v32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI19_0)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI19_0)
-; CHECK-NEXT: li a1, 32
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vle8.v v12, (a0)
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: lui a1, %hi(.LCPI19_0)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI19_0)
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vle8.v v12, (a1)
; CHECK-NEXT: vsext.vf2 v16, v12
; CHECK-NEXT: vrgather.vv v12, v8, v16
; CHECK-NEXT: vmv.v.v v8, v12
@@ -520,11 +520,11 @@ define <16 x half> @reverse_v16f16(<16 x half> %a) {
define <32 x half> @reverse_v32f16(<32 x half> %a) {
; CHECK-LABEL: reverse_v32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI34_0)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI34_0)
-; CHECK-NEXT: li a1, 32
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vle8.v v12, (a0)
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: lui a1, %hi(.LCPI34_0)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI34_0)
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vle8.v v12, (a1)
; CHECK-NEXT: vsext.vf2 v16, v12
; CHECK-NEXT: vrgather.vv v12, v8, v16
; CHECK-NEXT: vmv.v.v v8, v12
@@ -820,33 +820,33 @@ define <6 x i64> @reverse_v6i64(<6 x i64> %a) {
define <12 x i64> @reverse_v12i64(<12 x i64> %a) {
; RV32-BITS-UNKNOWN-LABEL: reverse_v12i64:
; RV32-BITS-UNKNOWN: # %bb.0:
-; RV32-BITS-UNKNOWN-NEXT: lui a0, %hi(.LCPI46_0)
-; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, %lo(.LCPI46_0)
-; RV32-BITS-UNKNOWN-NEXT: li a1, 32
-; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; RV32-BITS-UNKNOWN-NEXT: vle16.v v24, (a0)
+; RV32-BITS-UNKNOWN-NEXT: li a0, 32
+; RV32-BITS-UNKNOWN-NEXT: lui a1, %hi(.LCPI46_0)
+; RV32-BITS-UNKNOWN-NEXT: addi a1, a1, %lo(.LCPI46_0)
+; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV32-BITS-UNKNOWN-NEXT: vle16.v v24, (a1)
; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v16, v8, v24
; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v16
; RV32-BITS-UNKNOWN-NEXT: ret
;
; RV32-BITS-256-LABEL: reverse_v12i64:
; RV32-BITS-256: # %bb.0:
-; RV32-BITS-256-NEXT: lui a0, %hi(.LCPI46_0)
-; RV32-BITS-256-NEXT: addi a0, a0, %lo(.LCPI46_0)
-; RV32-BITS-256-NEXT: li a1, 32
-; RV32-BITS-256-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; RV32-BITS-256-NEXT: vle16.v v24, (a0)
+; RV32-BITS-256-NEXT: li a0, 32
+; RV32-BITS-256-NEXT: lui a1, %hi(.LCPI46_0)
+; RV32-BITS-256-NEXT: addi a1, a1, %lo(.LCPI46_0)
+; RV32-BITS-256-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV32-BITS-256-NEXT: vle16.v v24, (a1)
; RV32-BITS-256-NEXT: vrgatherei16.vv v16, v8, v24
; RV32-BITS-256-NEXT: vmv.v.v v8, v16
; RV32-BITS-256-NEXT: ret
;
; RV32-BITS-512-LABEL: reverse_v12i64:
; RV32-BITS-512: # %bb.0:
-; RV32-BITS-512-NEXT: lui a0, %hi(.LCPI46_0)
-; RV32-BITS-512-NEXT: addi a0, a0, %lo(.LCPI46_0)
-; RV32-BITS-512-NEXT: li a1, 32
-; RV32-BITS-512-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; RV32-BITS-512-NEXT: vle16.v v24, (a0)
+; RV32-BITS-512-NEXT: li a0, 32
+; RV32-BITS-512-NEXT: lui a1, %hi(.LCPI46_0)
+; RV32-BITS-512-NEXT: addi a1, a1, %lo(.LCPI46_0)
+; RV32-BITS-512-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV32-BITS-512-NEXT: vle16.v v24, (a1)
; RV32-BITS-512-NEXT: vrgatherei16.vv v16, v8, v24
; RV32-BITS-512-NEXT: vmv.v.v v8, v16
; RV32-BITS-512-NEXT: ret
@@ -883,11 +883,11 @@ define <12 x i64> @reverse_v12i64(<12 x i64> %a) {
;
; RV32-ZVBB-LABEL: reverse_v12i64:
; RV32-ZVBB: # %bb.0:
-; RV32-ZVBB-NEXT: lui a0, %hi(.LCPI46_0)
-; RV32-ZVBB-NEXT: addi a0, a0, %lo(.LCPI46_0)
-; RV32-ZVBB-NEXT: li a1, 32
-; RV32-ZVBB-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; RV32-ZVBB-NEXT: vle16.v v24, (a0)
+; RV32-ZVBB-NEXT: li a0, 32
+; RV32-ZVBB-NEXT: lui a1, %hi(.LCPI46_0)
+; RV32-ZVBB-NEXT: addi a1, a1, %lo(.LCPI46_0)
+; RV32-ZVBB-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV32-ZVBB-NEXT: vle16.v v24, (a1)
; RV32-ZVBB-NEXT: vrgatherei16.vv v16, v8, v24
; RV32-ZVBB-NEXT: vmv.v.v v8, v16
; RV32-ZVBB-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll
index 0161ac4bc338d..e2580c132f65e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll
@@ -225,11 +225,11 @@ declare <16 x i64> @llvm.experimental.stepvector.v16i64()
define <16 x i64> @stepvector_v16i64() {
; RV32-LABEL: stepvector_v16i64:
; RV32: # %bb.0:
-; RV32-NEXT: lui a0, %hi(.LCPI16_0)
-; RV32-NEXT: addi a0, a0, %lo(.LCPI16_0)
-; RV32-NEXT: li a1, 32
-; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; RV32-NEXT: vle8.v v16, (a0)
+; RV32-NEXT: li a0, 32
+; RV32-NEXT: lui a1, %hi(.LCPI16_0)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI16_0)
+; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT: vle8.v v16, (a1)
; RV32-NEXT: vsext.vf4 v8, v16
; RV32-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store-merge-crash.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store-merge-crash.ll
index 391117c72ece7..9a11355053a73 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store-merge-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store-merge-crash.ll
@@ -12,14 +12,14 @@
define void @baz() nounwind {
; CHECK-LABEL: baz:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: lui a0, %hi(foo)
-; CHECK-NEXT: addi a1, a0, %lo(foo)
-; CHECK-NEXT: lw a1, 4(a1)
-; CHECK-NEXT: lw a0, %lo(foo)(a0)
+; CHECK-NEXT: lui a0, %hi(foo+4)
+; CHECK-NEXT: lw a0, %lo(foo+4)(a0)
+; CHECK-NEXT: lui a1, %hi(foo)
+; CHECK-NEXT: lw a1, %lo(foo)(a1)
; CHECK-NEXT: lui a2, %hi(bar)
-; CHECK-NEXT: sw a1, %lo(bar)(a2)
-; CHECK-NEXT: addi a1, a2, %lo(bar)
-; CHECK-NEXT: sw a0, 4(a1)
+; CHECK-NEXT: sw a0, %lo(bar)(a2)
+; CHECK-NEXT: lui a0, %hi(bar+4)
+; CHECK-NEXT: sw a1, %lo(bar+4)(a0)
; CHECK-NEXT: ret
entry:
%0 = load i32, ptr getelementptr inbounds ([2 x i32], ptr @foo, i64 0, i64 1), align 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll
index 6e327457bebff..368f454fa5fda 100644
--- a/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll
@@ -106,11 +106,11 @@ define <16 x i8> @v16i8(<16 x i8> %a) {
define <32 x i8> @v16i8_2(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: v16i8_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI7_0)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI7_0)
-; CHECK-NEXT: li a1, 32
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vle8.v v12, (a0)
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: lui a1, %hi(.LCPI7_0)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI7_0)
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v12, (a1)
; CHECK-NEXT: vmv1r.v v14, v9
; CHECK-NEXT: vrgather.vv v10, v8, v12
; CHECK-NEXT: vid.v v8
@@ -230,11 +230,11 @@ define <16 x i16> @v16i16(<16 x i16> %a) {
define <32 x i16> @v16i16_2(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: v16i16_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI15_0)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI15_0)
-; CHECK-NEXT: li a1, 32
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vle16.v v16, (a0)
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: lui a1, %hi(.LCPI15_0)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI15_0)
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vle16.v v16, (a1)
; CHECK-NEXT: vmv2r.v v20, v10
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vrgather.vv v8, v12, v16
@@ -363,11 +363,11 @@ define <16 x i32> @v16i32(<16 x i32> %a) {
define <32 x i32> @v16i32_2(<16 x i32> %a, <16 x i32> %b) {
; CHECK-LABEL: v16i32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI23_0)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI23_0)
-; CHECK-NEXT: li a1, 32
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vle16.v v20, (a0)
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: lui a1, %hi(.LCPI23_0)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI23_0)
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vle16.v v20, (a1)
; CHECK-NEXT: vmv4r.v v24, v12
; CHECK-NEXT: vmv4r.v v16, v8
; CHECK-NEXT: vrgatherei16.vv v8, v16, v20
@@ -548,11 +548,11 @@ define <16 x half> @v16f16(<16 x half> %a) {
define <32 x half> @v16f16_2(<16 x half> %a) {
; CHECK-LABEL: v16f16_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI35_0)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI35_0)
-; CHECK-NEXT: li a1, 32
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vle16.v v16, (a0)
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: lui a1, %hi(.LCPI35_0)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI35_0)
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vle16.v v16, (a1)
; CHECK-NEXT: vrgather.vv v12, v8, v16
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
@@ -719,11 +719,11 @@ define <8 x double> @v4f64_2(<4 x double> %a, <4 x double> %b) {
define <32 x i8> @v32i8(<32 x i8> %a) {
; CHECK-LABEL: v32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI46_0)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI46_0)
-; CHECK-NEXT: li a1, 32
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vle8.v v12, (a0)
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: lui a1, %hi(.LCPI46_0)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI46_0)
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v12, (a1)
; CHECK-NEXT: vrgather.vv v10, v8, v12
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/tail-calls.ll b/llvm/test/CodeGen/RISCV/tail-calls.ll
index 87d69bfad38c2..d3e495bb723ad 100644
--- a/llvm/test/CodeGen/RISCV/tail-calls.ll
+++ b/llvm/test/CodeGen/RISCV/tail-calls.ll
@@ -56,12 +56,12 @@ define void @caller_indirect_tail(i32 %a) nounwind {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: beqz a0, .LBB3_2
; CHECK-NEXT: # %bb.1: # %entry
-; CHECK-NEXT: lui a0, %hi(callee_indirect2)
-; CHECK-NEXT: addi t1, a0, %lo(callee_indirect2)
+; CHECK-NEXT: lui t1, %hi(callee_indirect2)
+; CHECK-NEXT: addi t1, t1, %lo(callee_indirect2)
; CHECK-NEXT: jr t1
; CHECK-NEXT: .LBB3_2:
-; CHECK-NEXT: lui a0, %hi(callee_indirect1)
-; CHECK-NEXT: addi t1, a0, %lo(callee_indirect1)
+; CHECK-NEXT: lui t1, %hi(callee_indirect1)
+; CHECK-NEXT: addi t1, t1, %lo(callee_indirect1)
; CHECK-NEXT: jr t1
diff --git a/llvm/test/CodeGen/RISCV/unroll-loop-cse.ll b/llvm/test/CodeGen/RISCV/unroll-loop-cse.ll
index 2fd4572d23456..6530736304837 100644
--- a/llvm/test/CodeGen/RISCV/unroll-loop-cse.ll
+++ b/llvm/test/CodeGen/RISCV/unroll-loop-cse.ll
@@ -10,36 +10,30 @@
define signext i32 @unroll_loop_cse() {
; CHECK-LABEL: unroll_loop_cse:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(x)
-; CHECK-NEXT: lw a3, %lo(x)(a1)
-; CHECK-NEXT: lui a2, %hi(check)
-; CHECK-NEXT: lw a4, %lo(check)(a2)
+; CHECK-NEXT: lui a0, %hi(x)
+; CHECK-NEXT: lw a1, %lo(x)(a0)
+; CHECK-NEXT: lui a0, %hi(check)
+; CHECK-NEXT: lw a2, %lo(check)(a0)
; CHECK-NEXT: li a0, 1
-; CHECK-NEXT: bne a3, a4, .LBB0_6
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: addi a1, a1, %lo(x)
-; CHECK-NEXT: lw a1, 4(a1)
-; CHECK-NEXT: addi a2, a2, %lo(check)
-; CHECK-NEXT: lw a2, 4(a2)
; CHECK-NEXT: bne a1, a2, .LBB0_6
-; CHECK-NEXT: # %bb.2:
+; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: lui a1, %hi(x)
; CHECK-NEXT: addi a1, a1, %lo(x)
-; CHECK-NEXT: lw a3, 8(a1)
+; CHECK-NEXT: lw a3, 4(a1)
; CHECK-NEXT: lui a2, %hi(check)
; CHECK-NEXT: addi a2, a2, %lo(check)
+; CHECK-NEXT: lw a4, 4(a2)
+; CHECK-NEXT: bne a3, a4, .LBB0_6
+; CHECK-NEXT: # %bb.2:
+; CHECK-NEXT: lw a3, 8(a1)
; CHECK-NEXT: lw a4, 8(a2)
; CHECK-NEXT: bne a3, a4, .LBB0_6
; CHECK-NEXT: # %bb.3:
-; CHECK-NEXT: lw a1, 12(a1)
-; CHECK-NEXT: lw a2, 12(a2)
-; CHECK-NEXT: bne a1, a2, .LBB0_6
+; CHECK-NEXT: lw a3, 12(a1)
+; CHECK-NEXT: lw a4, 12(a2)
+; CHECK-NEXT: bne a3, a4, .LBB0_6
; CHECK-NEXT: # %bb.4:
-; CHECK-NEXT: lui a1, %hi(x)
-; CHECK-NEXT: addi a1, a1, %lo(x)
; CHECK-NEXT: lw a3, 16(a1)
-; CHECK-NEXT: lui a2, %hi(check)
-; CHECK-NEXT: addi a2, a2, %lo(check)
; CHECK-NEXT: lw a4, 16(a2)
; CHECK-NEXT: bne a3, a4, .LBB0_6
; CHECK-NEXT: # %bb.5:
diff --git a/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll b/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll
index 771a72f8d55f0..c4a94980f527f 100644
--- a/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll
+++ b/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll
@@ -10,12 +10,12 @@ define dso_local i32 @test_zext_i8() nounwind {
; RV32I-LABEL: test_zext_i8:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: lui a0, %hi(bytes)
-; RV32I-NEXT: lbu a1, %lo(bytes)(a0)
-; RV32I-NEXT: li a2, 136
-; RV32I-NEXT: bne a1, a2, .LBB0_3
+; RV32I-NEXT: lbu a0, %lo(bytes)(a0)
+; RV32I-NEXT: li a1, 136
+; RV32I-NEXT: bne a0, a1, .LBB0_3
; RV32I-NEXT: # %bb.1: # %entry
-; RV32I-NEXT: addi a0, a0, %lo(bytes)
-; RV32I-NEXT: lbu a0, 1(a0)
+; RV32I-NEXT: lui a0, %hi(bytes+1)
+; RV32I-NEXT: lbu a0, %lo(bytes+1)(a0)
; RV32I-NEXT: li a1, 7
; RV32I-NEXT: bne a0, a1, .LBB0_3
; RV32I-NEXT: # %bb.2: # %if.end
@@ -45,13 +45,13 @@ define dso_local i32 @test_zext_i16() nounwind {
; RV32I-LABEL: test_zext_i16:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: lui a0, %hi(shorts)
-; RV32I-NEXT: lhu a1, %lo(shorts)(a0)
-; RV32I-NEXT: lui a2, 16
-; RV32I-NEXT: addi a2, a2, -120
-; RV32I-NEXT: bne a1, a2, .LBB1_3
+; RV32I-NEXT: lhu a0, %lo(shorts)(a0)
+; RV32I-NEXT: lui a1, 16
+; RV32I-NEXT: addi a1, a1, -120
+; RV32I-NEXT: bne a0, a1, .LBB1_3
; RV32I-NEXT: # %bb.1: # %entry
-; RV32I-NEXT: addi a0, a0, %lo(shorts)
-; RV32I-NEXT: lhu a0, 2(a0)
+; RV32I-NEXT: lui a0, %hi(shorts+2)
+; RV32I-NEXT: lhu a0, %lo(shorts+2)(a0)
; RV32I-NEXT: li a1, 7
; RV32I-NEXT: bne a0, a1, .LBB1_3
; RV32I-NEXT: # %bb.2: # %if.end
More information about the llvm-commits
mailing list