[llvm] a2b5b58 - [RISCV] Use register allocation hints to improve use of compressed instructions.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 25 08:39:55 PST 2022


Author: Craig Topper
Date: 2022-11-25T08:39:44-08:00
New Revision: a2b5b584a576b253165e17e32d8c5627021ac881

URL: https://github.com/llvm/llvm-project/commit/a2b5b584a576b253165e17e32d8c5627021ac881
DIFF: https://github.com/llvm/llvm-project/commit/a2b5b584a576b253165e17e32d8c5627021ac881.diff

LOG: [RISCV] Use register allocation hints to improve use of compressed instructions.

Compressed instructions usually require one of the source registers
to also be the source register. The register allocator doesn't have
that bias on its own.

This patch adds register allocation hints to introduce this bias.
I've started with ADDI, ADDIW, and SLLI. These all have a 5-bit
field for the register. If the source and dest register are the
same they are guaranteed to compress as long as the immediate is
also 6 bits.

This code was inspired by similar code from the SystemZ target.

Reviewed By: reames

Differential Revision: https://reviews.llvm.org/D138242

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
    llvm/lib/Target/RISCV/RISCVRegisterInfo.h
    llvm/test/CodeGen/RISCV/add-before-shl.ll
    llvm/test/CodeGen/RISCV/addcarry.ll
    llvm/test/CodeGen/RISCV/aext-to-sext.ll
    llvm/test/CodeGen/RISCV/and.ll
    llvm/test/CodeGen/RISCV/atomic-rmw.ll
    llvm/test/CodeGen/RISCV/atomic-signext.ll
    llvm/test/CodeGen/RISCV/bittest.ll
    llvm/test/CodeGen/RISCV/branch-relaxation.ll
    llvm/test/CodeGen/RISCV/calling-conv-half.ll
    llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll
    llvm/test/CodeGen/RISCV/double-convert.ll
    llvm/test/CodeGen/RISCV/float-arith.ll
    llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
    llvm/test/CodeGen/RISCV/float-convert.ll
    llvm/test/CodeGen/RISCV/fold-vector-cmp.ll
    llvm/test/CodeGen/RISCV/forced-atomics.ll
    llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
    llvm/test/CodeGen/RISCV/half-arith.ll
    llvm/test/CodeGen/RISCV/half-convert.ll
    llvm/test/CodeGen/RISCV/imm.ll
    llvm/test/CodeGen/RISCV/narrow-shl-cst.ll
    llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll
    llvm/test/CodeGen/RISCV/rotl-rotr.ll
    llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
    llvm/test/CodeGen/RISCV/rv32zbkb.ll
    llvm/test/CodeGen/RISCV/rv32zbs.ll
    llvm/test/CodeGen/RISCV/rv64i-complex-float.ll
    llvm/test/CodeGen/RISCV/rv64zbkb.ll
    llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
    llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
    llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
    llvm/test/CodeGen/RISCV/rvv/vpstore.ll
    llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
    llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
    llvm/test/CodeGen/RISCV/shifts.ll
    llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
    llvm/test/CodeGen/RISCV/unaligned-load-store.ll
    llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
    llvm/test/CodeGen/RISCV/vararg.ll
    llvm/test/CodeGen/RISCV/xaluo.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 38a6c7f8ed1ae..7c42eb1ea221a 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -29,6 +29,12 @@
 
 using namespace llvm;
 
+static cl::opt<bool>
+    DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden,
+                         cl::init(false),
+                         cl::desc("Disable two address hints for register "
+                                  "allocation"));
+
 static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive");
 static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive");
 static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive");
@@ -376,3 +382,70 @@ unsigned
 RISCVRegisterInfo::getRegisterCostTableIndex(const MachineFunction &MF) const {
   return MF.getSubtarget<RISCVSubtarget>().hasStdExtC() ? 1 : 0;
 }
+
+// Add two address hints to improve chances of being able to use a compressed
+// instruction.
+bool RISCVRegisterInfo::getRegAllocationHints(
+    Register VirtReg, ArrayRef<MCPhysReg> Order,
+    SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
+    const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
+  const MachineRegisterInfo *MRI = &MF.getRegInfo();
+
+  bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
+      VirtReg, Order, Hints, MF, VRM, Matrix);
+
+  if (!VRM || DisableRegAllocHints)
+    return BaseImplRetVal;
+
+  // Add any two address hints after any copy hints.
+  SmallSet<unsigned, 4> TwoAddrHints;
+
+  auto tryAddHint = [&](const MachineOperand &VRRegMO,
+                        const MachineOperand &MO) -> void {
+    Register Reg = MO.getReg();
+    Register PhysReg =
+        Register::isPhysicalRegister(Reg) ? Reg : Register(VRM->getPhys(Reg));
+    if (PhysReg) {
+      assert(!MO.getSubReg() && !VRRegMO.getSubReg() && "Unexpected subreg!");
+      if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
+        TwoAddrHints.insert(PhysReg);
+    }
+  };
+
+  // For now we support the compressible instructions which can encode all
+  // registers and have a single register source.
+  // TODO: Add more compressed instructions.
+  auto isCompressible = [](const MachineInstr &MI) {
+    switch (MI.getOpcode()) {
+    default:
+      return false;
+    case RISCV::SLLI:
+      return true;
+    case RISCV::ADDI:
+    case RISCV::ADDIW:
+      return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
+    }
+  };
+
+  for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
+    const MachineInstr &MI = *MO.getParent();
+    if (isCompressible(MI)) {
+      unsigned OpIdx = MI.getOperandNo(&MO);
+      if (OpIdx == 0 && MI.getOperand(1).isReg()) {
+        tryAddHint(MO, MI.getOperand(1));
+        if (MI.isCommutable() && MI.getOperand(2).isReg())
+          tryAddHint(MO, MI.getOperand(2));
+      } else if (OpIdx == 1) {
+        tryAddHint(MO, MI.getOperand(0));
+      } else if (MI.isCommutable() && OpIdx == 2) {
+        tryAddHint(MO, MI.getOperand(0));
+      }
+    }
+  }
+
+  for (MCPhysReg OrderReg : Order)
+    if (TwoAddrHints.count(OrderReg))
+      Hints.push_back(OrderReg);
+
+  return BaseImplRetVal;
+}

diff  --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
index aa199de38d26b..43911477d0a6d 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
@@ -66,6 +66,11 @@ struct RISCVRegisterInfo : public RISCVGenRegisterInfo {
                         SmallVectorImpl<uint64_t> &Ops) const override;
 
   unsigned getRegisterCostTableIndex(const MachineFunction &MF) const override;
+
+  bool getRegAllocationHints(Register VirtReg, ArrayRef<MCPhysReg> Order,
+                             SmallVectorImpl<MCPhysReg> &Hints,
+                             const MachineFunction &MF, const VirtRegMap *VRM,
+                             const LiveRegMatrix *Matrix) const override;
 };
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/add-before-shl.ll b/llvm/test/CodeGen/RISCV/add-before-shl.ll
index a977cad504d3e..dd20dd25cbfaa 100644
--- a/llvm/test/CodeGen/RISCV/add-before-shl.ll
+++ b/llvm/test/CodeGen/RISCV/add-before-shl.ll
@@ -196,7 +196,7 @@ define i128 @add_wide_operand(i128 %a) nounwind {
 ;
 ; RV32C-LABEL: add_wide_operand:
 ; RV32C:       # %bb.0:
-; RV32C-NEXT:    c.lw a2, 4(a1)
+; RV32C-NEXT:    lw a6, 4(a1)
 ; RV32C-NEXT:    c.lw a3, 12(a1)
 ; RV32C-NEXT:    c.lw a4, 0(a1)
 ; RV32C-NEXT:    c.lw a1, 8(a1)
@@ -204,18 +204,18 @@ define i128 @add_wide_operand(i128 %a) nounwind {
 ; RV32C-NEXT:    c.add a3, a5
 ; RV32C-NEXT:    c.slli a3, 3
 ; RV32C-NEXT:    srli a5, a1, 29
-; RV32C-NEXT:    or a6, a3, a5
-; RV32C-NEXT:    srli a5, a4, 29
-; RV32C-NEXT:    slli a3, a2, 3
 ; RV32C-NEXT:    c.or a3, a5
-; RV32C-NEXT:    c.srli a2, 29
+; RV32C-NEXT:    srli a5, a4, 29
+; RV32C-NEXT:    slli a2, a6, 3
+; RV32C-NEXT:    c.or a2, a5
+; RV32C-NEXT:    srli a5, a6, 29
 ; RV32C-NEXT:    c.slli a1, 3
-; RV32C-NEXT:    c.or a1, a2
-; RV32C-NEXT:    slli a2, a4, 3
-; RV32C-NEXT:    c.sw a2, 0(a0)
+; RV32C-NEXT:    c.or a1, a5
+; RV32C-NEXT:    c.slli a4, 3
+; RV32C-NEXT:    c.sw a4, 0(a0)
 ; RV32C-NEXT:    c.sw a1, 8(a0)
-; RV32C-NEXT:    c.sw a3, 4(a0)
-; RV32C-NEXT:    sw a6, 12(a0)
+; RV32C-NEXT:    c.sw a2, 4(a0)
+; RV32C-NEXT:    c.sw a3, 12(a0)
 ; RV32C-NEXT:    c.jr ra
 ;
 ; RV64C-LABEL: add_wide_operand:

diff  --git a/llvm/test/CodeGen/RISCV/addcarry.ll b/llvm/test/CodeGen/RISCV/addcarry.ll
index 44d1f96758535..053b98755417b 100644
--- a/llvm/test/CodeGen/RISCV/addcarry.ll
+++ b/llvm/test/CodeGen/RISCV/addcarry.ll
@@ -32,13 +32,13 @@ define i64 @addcarry(i64 %x, i64 %y) nounwind {
 ; RISCV32-NEXT:  # %bb.3:
 ; RISCV32-NEXT:    sub a5, a5, a0
 ; RISCV32-NEXT:  .LBB0_4:
-; RISCV32-NEXT:    slli a1, a5, 30
-; RISCV32-NEXT:    srli a3, a4, 2
-; RISCV32-NEXT:    or a1, a1, a3
-; RISCV32-NEXT:    slli a3, a4, 30
+; RISCV32-NEXT:    slli a5, a5, 30
+; RISCV32-NEXT:    srli a1, a4, 2
+; RISCV32-NEXT:    or a1, a5, a1
+; RISCV32-NEXT:    slli a4, a4, 30
 ; RISCV32-NEXT:    mul a0, a0, a2
 ; RISCV32-NEXT:    srli a0, a0, 2
-; RISCV32-NEXT:    or a0, a3, a0
+; RISCV32-NEXT:    or a0, a4, a0
 ; RISCV32-NEXT:    ret
   %tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 2);
   ret i64 %tmp;

diff  --git a/llvm/test/CodeGen/RISCV/aext-to-sext.ll b/llvm/test/CodeGen/RISCV/aext-to-sext.ll
index 806c495fa6777..fb09b7cc21fd7 100644
--- a/llvm/test/CodeGen/RISCV/aext-to-sext.ll
+++ b/llvm/test/CodeGen/RISCV/aext-to-sext.ll
@@ -82,8 +82,8 @@ define i64 @sext_phi_constants(i32 signext %c) {
 ; RV64I-NEXT:  # %bb.1: # %iffalse
 ; RV64I-NEXT:    li a1, -2
 ; RV64I-NEXT:  .LBB2_2: # %merge
-; RV64I-NEXT:    slli a0, a1, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    srli a0, a1, 32
 ; RV64I-NEXT:    ret
   %a = icmp ne i32 %c, 0
   br i1 %a, label %iftrue, label %iffalse

diff  --git a/llvm/test/CodeGen/RISCV/and.ll b/llvm/test/CodeGen/RISCV/and.ll
index d1d2e6a0fadc8..5eff422013da6 100644
--- a/llvm/test/CodeGen/RISCV/and.ll
+++ b/llvm/test/CodeGen/RISCV/and.ll
@@ -187,8 +187,8 @@ define i64 @and64_0xffffffff00000000(i64 %x) {
 define i64 @and64_0x7fffffff00000000(i64 %x) {
 ; RV32I-LABEL: and64_0x7fffffff00000000:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a0, a1, 1
-; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srli a1, a1, 1
 ; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
index 42f8d41e527f4..c34ee47ec8788 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
@@ -8350,8 +8350,8 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
-; RV32I-NEXT:    lui a0, 16
-; RV32I-NEXT:    addi s2, a0, -1
+; RV32I-NEXT:    lui s2, 16
+; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
 ; RV32I-NEXT:    j .LBB100_2
 ; RV32I-NEXT:  .LBB100_1: # %atomicrmw.start
@@ -8419,8 +8419,8 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
-; RV64I-NEXT:    lui a0, 16
-; RV64I-NEXT:    addiw s2, a0, -1
+; RV64I-NEXT:    lui s2, 16
+; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
 ; RV64I-NEXT:    j .LBB100_2
 ; RV64I-NEXT:  .LBB100_1: # %atomicrmw.start
@@ -8492,8 +8492,8 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
-; RV32I-NEXT:    lui a0, 16
-; RV32I-NEXT:    addi s2, a0, -1
+; RV32I-NEXT:    lui s2, 16
+; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
 ; RV32I-NEXT:    j .LBB101_2
 ; RV32I-NEXT:  .LBB101_1: # %atomicrmw.start
@@ -8561,8 +8561,8 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
-; RV64I-NEXT:    lui a0, 16
-; RV64I-NEXT:    addiw s2, a0, -1
+; RV64I-NEXT:    lui s2, 16
+; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
 ; RV64I-NEXT:    j .LBB101_2
 ; RV64I-NEXT:  .LBB101_1: # %atomicrmw.start
@@ -8634,8 +8634,8 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
-; RV32I-NEXT:    lui a0, 16
-; RV32I-NEXT:    addi s2, a0, -1
+; RV32I-NEXT:    lui s2, 16
+; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
 ; RV32I-NEXT:    j .LBB102_2
 ; RV32I-NEXT:  .LBB102_1: # %atomicrmw.start
@@ -8703,8 +8703,8 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
-; RV64I-NEXT:    lui a0, 16
-; RV64I-NEXT:    addiw s2, a0, -1
+; RV64I-NEXT:    lui s2, 16
+; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
 ; RV64I-NEXT:    j .LBB102_2
 ; RV64I-NEXT:  .LBB102_1: # %atomicrmw.start
@@ -8776,8 +8776,8 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
-; RV32I-NEXT:    lui a0, 16
-; RV32I-NEXT:    addi s2, a0, -1
+; RV32I-NEXT:    lui s2, 16
+; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
 ; RV32I-NEXT:    j .LBB103_2
 ; RV32I-NEXT:  .LBB103_1: # %atomicrmw.start
@@ -8845,8 +8845,8 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
-; RV64I-NEXT:    lui a0, 16
-; RV64I-NEXT:    addiw s2, a0, -1
+; RV64I-NEXT:    lui s2, 16
+; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
 ; RV64I-NEXT:    j .LBB103_2
 ; RV64I-NEXT:  .LBB103_1: # %atomicrmw.start
@@ -8918,8 +8918,8 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
-; RV32I-NEXT:    lui a0, 16
-; RV32I-NEXT:    addi s2, a0, -1
+; RV32I-NEXT:    lui s2, 16
+; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
 ; RV32I-NEXT:    j .LBB104_2
 ; RV32I-NEXT:  .LBB104_1: # %atomicrmw.start
@@ -8987,8 +8987,8 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
-; RV64I-NEXT:    lui a0, 16
-; RV64I-NEXT:    addiw s2, a0, -1
+; RV64I-NEXT:    lui s2, 16
+; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
 ; RV64I-NEXT:    j .LBB104_2
 ; RV64I-NEXT:  .LBB104_1: # %atomicrmw.start
@@ -9060,8 +9060,8 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
-; RV32I-NEXT:    lui a0, 16
-; RV32I-NEXT:    addi s2, a0, -1
+; RV32I-NEXT:    lui s2, 16
+; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
 ; RV32I-NEXT:    j .LBB105_2
 ; RV32I-NEXT:  .LBB105_1: # %atomicrmw.start
@@ -9129,8 +9129,8 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
-; RV64I-NEXT:    lui a0, 16
-; RV64I-NEXT:    addiw s2, a0, -1
+; RV64I-NEXT:    lui s2, 16
+; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
 ; RV64I-NEXT:    j .LBB105_2
 ; RV64I-NEXT:  .LBB105_1: # %atomicrmw.start
@@ -9202,8 +9202,8 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
-; RV32I-NEXT:    lui a0, 16
-; RV32I-NEXT:    addi s2, a0, -1
+; RV32I-NEXT:    lui s2, 16
+; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
 ; RV32I-NEXT:    j .LBB106_2
 ; RV32I-NEXT:  .LBB106_1: # %atomicrmw.start
@@ -9271,8 +9271,8 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
-; RV64I-NEXT:    lui a0, 16
-; RV64I-NEXT:    addiw s2, a0, -1
+; RV64I-NEXT:    lui s2, 16
+; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
 ; RV64I-NEXT:    j .LBB106_2
 ; RV64I-NEXT:  .LBB106_1: # %atomicrmw.start
@@ -9344,8 +9344,8 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
-; RV32I-NEXT:    lui a0, 16
-; RV32I-NEXT:    addi s2, a0, -1
+; RV32I-NEXT:    lui s2, 16
+; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
 ; RV32I-NEXT:    j .LBB107_2
 ; RV32I-NEXT:  .LBB107_1: # %atomicrmw.start
@@ -9413,8 +9413,8 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
-; RV64I-NEXT:    lui a0, 16
-; RV64I-NEXT:    addiw s2, a0, -1
+; RV64I-NEXT:    lui s2, 16
+; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
 ; RV64I-NEXT:    j .LBB107_2
 ; RV64I-NEXT:  .LBB107_1: # %atomicrmw.start
@@ -9486,8 +9486,8 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
-; RV32I-NEXT:    lui a0, 16
-; RV32I-NEXT:    addi s2, a0, -1
+; RV32I-NEXT:    lui s2, 16
+; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
 ; RV32I-NEXT:    j .LBB108_2
 ; RV32I-NEXT:  .LBB108_1: # %atomicrmw.start
@@ -9555,8 +9555,8 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
-; RV64I-NEXT:    lui a0, 16
-; RV64I-NEXT:    addiw s2, a0, -1
+; RV64I-NEXT:    lui s2, 16
+; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
 ; RV64I-NEXT:    j .LBB108_2
 ; RV64I-NEXT:  .LBB108_1: # %atomicrmw.start
@@ -9628,8 +9628,8 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
-; RV32I-NEXT:    lui a0, 16
-; RV32I-NEXT:    addi s2, a0, -1
+; RV32I-NEXT:    lui s2, 16
+; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
 ; RV32I-NEXT:    j .LBB109_2
 ; RV32I-NEXT:  .LBB109_1: # %atomicrmw.start
@@ -9697,8 +9697,8 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
-; RV64I-NEXT:    lui a0, 16
-; RV64I-NEXT:    addiw s2, a0, -1
+; RV64I-NEXT:    lui s2, 16
+; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
 ; RV64I-NEXT:    j .LBB109_2
 ; RV64I-NEXT:  .LBB109_1: # %atomicrmw.start

diff  --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll
index e893b38873538..16dea0e7accb5 100644
--- a/llvm/test/CodeGen/RISCV/atomic-signext.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll
@@ -609,8 +609,8 @@ define signext i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB10_1
 ; RV32I-NEXT:  .LBB10_4: # %atomicrmw.end
-; RV32I-NEXT:    slli a0, a3, 24
-; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    slli a3, a3, 24
+; RV32I-NEXT:    srai a0, a3, 24
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -683,8 +683,8 @@ define signext i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB10_1
 ; RV64I-NEXT:  .LBB10_4: # %atomicrmw.end
-; RV64I-NEXT:    slli a0, a3, 56
-; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    slli a3, a3, 56
+; RV64I-NEXT:    srai a0, a3, 56
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -761,8 +761,8 @@ define signext i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB11_1
 ; RV32I-NEXT:  .LBB11_4: # %atomicrmw.end
-; RV32I-NEXT:    slli a0, a3, 24
-; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    slli a3, a3, 24
+; RV32I-NEXT:    srai a0, a3, 24
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -835,8 +835,8 @@ define signext i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB11_1
 ; RV64I-NEXT:  .LBB11_4: # %atomicrmw.end
-; RV64I-NEXT:    slli a0, a3, 56
-; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    slli a3, a3, 56
+; RV64I-NEXT:    srai a0, a3, 56
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -911,8 +911,8 @@ define signext i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB12_1
 ; RV32I-NEXT:  .LBB12_4: # %atomicrmw.end
-; RV32I-NEXT:    slli a0, a3, 24
-; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    slli a3, a3, 24
+; RV32I-NEXT:    srai a0, a3, 24
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -978,8 +978,8 @@ define signext i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB12_1
 ; RV64I-NEXT:  .LBB12_4: # %atomicrmw.end
-; RV64I-NEXT:    slli a0, a3, 56
-; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    slli a3, a3, 56
+; RV64I-NEXT:    srai a0, a3, 56
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -1049,8 +1049,8 @@ define signext i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB13_1
 ; RV32I-NEXT:  .LBB13_4: # %atomicrmw.end
-; RV32I-NEXT:    slli a0, a3, 24
-; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    slli a3, a3, 24
+; RV32I-NEXT:    srai a0, a3, 24
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -1116,8 +1116,8 @@ define signext i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB13_1
 ; RV64I-NEXT:  .LBB13_4: # %atomicrmw.end
-; RV64I-NEXT:    slli a0, a3, 56
-; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    slli a3, a3, 56
+; RV64I-NEXT:    srai a0, a3, 56
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -1657,8 +1657,8 @@ define signext i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB21_1
 ; RV32I-NEXT:  .LBB21_4: # %atomicrmw.end
-; RV32I-NEXT:    slli a0, a3, 16
-; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    slli a3, a3, 16
+; RV32I-NEXT:    srai a0, a3, 16
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -1733,8 +1733,8 @@ define signext i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB21_1
 ; RV64I-NEXT:  .LBB21_4: # %atomicrmw.end
-; RV64I-NEXT:    slli a0, a3, 48
-; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    slli a3, a3, 48
+; RV64I-NEXT:    srai a0, a3, 48
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -1813,8 +1813,8 @@ define signext i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB22_1
 ; RV32I-NEXT:  .LBB22_4: # %atomicrmw.end
-; RV32I-NEXT:    slli a0, a3, 16
-; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    slli a3, a3, 16
+; RV32I-NEXT:    srai a0, a3, 16
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -1889,8 +1889,8 @@ define signext i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB22_1
 ; RV64I-NEXT:  .LBB22_4: # %atomicrmw.end
-; RV64I-NEXT:    slli a0, a3, 48
-; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    slli a3, a3, 48
+; RV64I-NEXT:    srai a0, a3, 48
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -1946,8 +1946,8 @@ define signext i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
-; RV32I-NEXT:    lui a0, 16
-; RV32I-NEXT:    addi s2, a0, -1
+; RV32I-NEXT:    lui s2, 16
+; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
 ; RV32I-NEXT:    j .LBB23_2
 ; RV32I-NEXT:  .LBB23_1: # %atomicrmw.start
@@ -1970,8 +1970,8 @@ define signext i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a2, s0
 ; RV32I-NEXT:    j .LBB23_1
 ; RV32I-NEXT:  .LBB23_4: # %atomicrmw.end
-; RV32I-NEXT:    slli a0, a1, 16
-; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai a0, a1, 16
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -2018,8 +2018,8 @@ define signext i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
-; RV64I-NEXT:    lui a0, 16
-; RV64I-NEXT:    addiw s2, a0, -1
+; RV64I-NEXT:    lui s2, 16
+; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
 ; RV64I-NEXT:    j .LBB23_2
 ; RV64I-NEXT:  .LBB23_1: # %atomicrmw.start
@@ -2042,8 +2042,8 @@ define signext i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a2, s0
 ; RV64I-NEXT:    j .LBB23_1
 ; RV64I-NEXT:  .LBB23_4: # %atomicrmw.end
-; RV64I-NEXT:    slli a0, a1, 48
-; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai a0, a1, 48
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -2094,8 +2094,8 @@ define signext i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
-; RV32I-NEXT:    lui a0, 16
-; RV32I-NEXT:    addi s2, a0, -1
+; RV32I-NEXT:    lui s2, 16
+; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
 ; RV32I-NEXT:    j .LBB24_2
 ; RV32I-NEXT:  .LBB24_1: # %atomicrmw.start
@@ -2118,8 +2118,8 @@ define signext i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a2, s0
 ; RV32I-NEXT:    j .LBB24_1
 ; RV32I-NEXT:  .LBB24_4: # %atomicrmw.end
-; RV32I-NEXT:    slli a0, a1, 16
-; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai a0, a1, 16
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -2166,8 +2166,8 @@ define signext i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
-; RV64I-NEXT:    lui a0, 16
-; RV64I-NEXT:    addiw s2, a0, -1
+; RV64I-NEXT:    lui s2, 16
+; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
 ; RV64I-NEXT:    j .LBB24_2
 ; RV64I-NEXT:  .LBB24_1: # %atomicrmw.start
@@ -2190,8 +2190,8 @@ define signext i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a2, s0
 ; RV64I-NEXT:    j .LBB24_1
 ; RV64I-NEXT:  .LBB24_4: # %atomicrmw.end
-; RV64I-NEXT:    slli a0, a1, 48
-; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai a0, a1, 48
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/bittest.ll b/llvm/test/CodeGen/RISCV/bittest.ll
index 9f6525283b9d1..73dc9f0e0a757 100644
--- a/llvm/test/CodeGen/RISCV/bittest.ll
+++ b/llvm/test/CodeGen/RISCV/bittest.ll
@@ -987,8 +987,8 @@ define i64 @bit_55_z_select_i64(i64 %a, i64 %b, i64 %c) {
 define i64 @bit_55_nz_select_i64(i64 %a, i64 %b, i64 %c) {
 ; RV32I-LABEL: bit_55_nz_select_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a0, a1, 8
-; RV32I-NEXT:    srli a6, a0, 31
+; RV32I-NEXT:    slli a1, a1, 8
+; RV32I-NEXT:    srli a6, a1, 31
 ; RV32I-NEXT:    mv a1, a3
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    bnez a6, .LBB34_2
@@ -1584,8 +1584,8 @@ define void @bit_32_nz_branch_i64(i64 %0) {
 define void @bit_62_z_branch_i64(i64 %0) {
 ; RV32-LABEL: bit_62_z_branch_i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    slli a0, a1, 1
-; RV32-NEXT:    bltz a0, .LBB55_2
+; RV32-NEXT:    slli a1, a1, 1
+; RV32-NEXT:    bltz a1, .LBB55_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    tail bar at plt
 ; RV32-NEXT:  .LBB55_2:
@@ -1614,8 +1614,8 @@ define void @bit_62_z_branch_i64(i64 %0) {
 define void @bit_62_nz_branch_i64(i64 %0) {
 ; RV32-LABEL: bit_62_nz_branch_i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    slli a0, a1, 1
-; RV32-NEXT:    bgez a0, .LBB56_2
+; RV32-NEXT:    slli a1, a1, 1
+; RV32-NEXT:    bgez a1, .LBB56_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    tail bar at plt
 ; RV32-NEXT:  .LBB56_2:

diff  --git a/llvm/test/CodeGen/RISCV/branch-relaxation.ll b/llvm/test/CodeGen/RISCV/branch-relaxation.ll
index d685d16cb57cc..285764f998098 100644
--- a/llvm/test/CodeGen/RISCV/branch-relaxation.ll
+++ b/llvm/test/CodeGen/RISCV/branch-relaxation.ll
@@ -342,11 +342,9 @@ define void @relax_jal_spill_32() {
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    li ra, 1
 ; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    sd ra, 16(sp) # 8-byte Folded Spill
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    li t0, 5
 ; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    sd t0, 8(sp) # 8-byte Folded Spill
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    li t1, 6
 ; CHECK-RV64-NEXT:    #NO_APP
@@ -422,24 +420,24 @@ define void @relax_jal_spill_32() {
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    li t5, 30
 ; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    sext.w t0, t5
+; CHECK-RV64-NEXT:    sd t5, 16(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sext.w t5, t5
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    li t6, 31
 ; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    sext.w ra, t6
-; CHECK-RV64-NEXT:    beq t0, ra, .LBB2_1
+; CHECK-RV64-NEXT:    sd t6, 8(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sext.w t6, t6
+; CHECK-RV64-NEXT:    beq t5, t6, .LBB2_1
 ; CHECK-RV64-NEXT:  # %bb.3:
-; CHECK-RV64-NEXT:    jump .LBB2_2, t0
+; CHECK-RV64-NEXT:    jump .LBB2_2, t5
 ; CHECK-RV64-NEXT:  .LBB2_1: # %branch_1
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    .zero 1048576
 ; CHECK-RV64-NEXT:    #NO_APP
 ; CHECK-RV64-NEXT:  .LBB2_2: # %branch_2
-; CHECK-RV64-NEXT:    ld ra, 16(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    # reg use ra
 ; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    ld t0, 8(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    # reg use t0
 ; CHECK-RV64-NEXT:    #NO_APP
@@ -515,9 +513,11 @@ define void @relax_jal_spill_32() {
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    # reg use t4
 ; CHECK-RV64-NEXT:    #NO_APP
+; CHECK-RV64-NEXT:    ld t5, 16(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    # reg use t5
 ; CHECK-RV64-NEXT:    #NO_APP
+; CHECK-RV64-NEXT:    ld t6, 8(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    # reg use t6
 ; CHECK-RV64-NEXT:    #NO_APP
@@ -885,17 +885,9 @@ define void @relax_jal_spill_32_adjust_spill_slot() {
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    li ra, 1
 ; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    lui a0, 1
-; CHECK-RV64-NEXT:    addiw a0, a0, -8
-; CHECK-RV64-NEXT:    add a0, sp, a0
-; CHECK-RV64-NEXT:    sd ra, 0(a0) # 8-byte Folded Spill
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    li t0, 5
 ; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    lui a0, 1
-; CHECK-RV64-NEXT:    addiw a0, a0, -16
-; CHECK-RV64-NEXT:    add a0, sp, a0
-; CHECK-RV64-NEXT:    sd t0, 0(a0) # 8-byte Folded Spill
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    li t1, 6
 ; CHECK-RV64-NEXT:    #NO_APP
@@ -971,30 +963,32 @@ define void @relax_jal_spill_32_adjust_spill_slot() {
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    li t5, 30
 ; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    sext.w t0, t5
+; CHECK-RV64-NEXT:    sd t0, 0(sp)
+; CHECK-RV64-NEXT:    lui t0, 1
+; CHECK-RV64-NEXT:    addiw t0, t0, -8
+; CHECK-RV64-NEXT:    add t0, sp, t0
+; CHECK-RV64-NEXT:    sd t5, 0(t0) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sext.w t5, t5
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    li t6, 31
 ; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    sext.w ra, t6
-; CHECK-RV64-NEXT:    beq t0, ra, .LBB3_1
+; CHECK-RV64-NEXT:    lui t0, 1
+; CHECK-RV64-NEXT:    addiw t0, t0, -16
+; CHECK-RV64-NEXT:    add t0, sp, t0
+; CHECK-RV64-NEXT:    sd t6, 0(t0) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    ld t0, 0(sp)
+; CHECK-RV64-NEXT:    sext.w t6, t6
+; CHECK-RV64-NEXT:    beq t5, t6, .LBB3_1
 ; CHECK-RV64-NEXT:  # %bb.3:
-; CHECK-RV64-NEXT:    jump .LBB3_2, t0
+; CHECK-RV64-NEXT:    jump .LBB3_2, t5
 ; CHECK-RV64-NEXT:  .LBB3_1: # %branch_1
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    .zero 1048576
 ; CHECK-RV64-NEXT:    #NO_APP
 ; CHECK-RV64-NEXT:  .LBB3_2: # %branch_2
-; CHECK-RV64-NEXT:    lui t0, 1
-; CHECK-RV64-NEXT:    addiw t0, t0, -8
-; CHECK-RV64-NEXT:    add t0, sp, t0
-; CHECK-RV64-NEXT:    ld ra, 0(t0) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    # reg use ra
 ; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    lui t0, 1
-; CHECK-RV64-NEXT:    addiw t0, t0, -16
-; CHECK-RV64-NEXT:    add t0, sp, t0
-; CHECK-RV64-NEXT:    ld t0, 0(t0) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    # reg use t0
 ; CHECK-RV64-NEXT:    #NO_APP
@@ -1070,9 +1064,17 @@ define void @relax_jal_spill_32_adjust_spill_slot() {
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    # reg use t4
 ; CHECK-RV64-NEXT:    #NO_APP
+; CHECK-RV64-NEXT:    lui a0, 1
+; CHECK-RV64-NEXT:    addiw a0, a0, -8
+; CHECK-RV64-NEXT:    add a0, sp, a0
+; CHECK-RV64-NEXT:    ld t5, 0(a0) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    # reg use t5
 ; CHECK-RV64-NEXT:    #NO_APP
+; CHECK-RV64-NEXT:    lui a0, 1
+; CHECK-RV64-NEXT:    addiw a0, a0, -16
+; CHECK-RV64-NEXT:    add a0, sp, a0
+; CHECK-RV64-NEXT:    ld t6, 0(a0) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    # reg use t6
 ; CHECK-RV64-NEXT:    #NO_APP

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
index d14877a8aa959..a684a14616ee5 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-half.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
@@ -19,8 +19,8 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    slli a0, a1, 16
-; RV32I-NEXT:    srli a0, a0, 16
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srli a0, a1, 16
 ; RV32I-NEXT:    call __extendhfsf2 at plt
 ; RV32I-NEXT:    call __fixsfsi at plt
 ; RV32I-NEXT:    add a0, s0, a0
@@ -35,8 +35,8 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    slli a0, a1, 48
-; RV64I-NEXT:    srli a0, a0, 48
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srli a0, a1, 48
 ; RV64I-NEXT:    call __extendhfsf2 at plt
 ; RV64I-NEXT:    call __fixsfdi at plt
 ; RV64I-NEXT:    addw a0, s0, a0

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll
index f972172bd4952..f424e77182a91 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll
@@ -33,8 +33,8 @@ define i64 @caller_double_in_regs() nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    li a0, 1
-; RV64I-NEXT:    slli a1, a0, 62
+; RV64I-NEXT:    li a1, 1
+; RV64I-NEXT:    slli a1, a1, 62
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    call callee_double_in_regs at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll
index 469203761614d..2eb7970e6eb29 100644
--- a/llvm/test/CodeGen/RISCV/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert.ll
@@ -110,10 +110,9 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
 ; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    lui a0, 269824
-; RV32I-NEXT:    addi a3, a0, -1
+; RV32I-NEXT:    lui a3, 269824
+; RV32I-NEXT:    addi a3, a3, -1
 ; RV32I-NEXT:    lui a2, 1047552
-; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __gtdf2 at plt
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    lui a3, 794112
@@ -161,9 +160,8 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
 ; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    li a0, -497
-; RV64I-NEXT:    slli a1, a0, 53
-; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    li a1, -497
+; RV64I-NEXT:    slli a1, a1, 53
 ; RV64I-NEXT:    call __gedf2 at plt
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
@@ -306,10 +304,9 @@ define i32 @fcvt_wu_d_sat(double %a) nounwind {
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    lui a0, 270080
-; RV32I-NEXT:    addi a3, a0, -1
+; RV32I-NEXT:    lui a3, 270080
+; RV32I-NEXT:    addi a3, a3, -1
 ; RV32I-NEXT:    lui a2, 1048064
-; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __gtdf2 at plt
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    neg s2, a0
@@ -596,10 +593,9 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV32I-NEXT:    sw s6, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    lui a0, 278016
-; RV32I-NEXT:    addi s2, a0, -1
+; RV32I-NEXT:    lui s2, 278016
+; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    li a2, -1
-; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a3, s2
 ; RV32I-NEXT:    call __gtdf2 at plt
 ; RV32I-NEXT:    mv s4, a0
@@ -676,9 +672,8 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    li a0, -481
-; RV64I-NEXT:    slli a1, a0, 53
-; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    li a1, -481
+; RV64I-NEXT:    slli a1, a1, 53
 ; RV64I-NEXT:    call __gedf2 at plt
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
@@ -800,10 +795,9 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
 ; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    lui a0, 278272
-; RV32I-NEXT:    addi s2, a0, -1
+; RV32I-NEXT:    lui s2, 278272
+; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    li a2, -1
-; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a3, s2
 ; RV32I-NEXT:    call __gtdf2 at plt
 ; RV32I-NEXT:    sgtz a0, a0
@@ -1357,8 +1351,8 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
 ; RV32I-NEXT:  .LBB26_2: # %start
 ; RV32I-NEXT:    blez s2, .LBB26_4
 ; RV32I-NEXT:  # %bb.3: # %start
-; RV32I-NEXT:    lui a0, 8
-; RV32I-NEXT:    addi s3, a0, -1
+; RV32I-NEXT:    lui s3, 8
+; RV32I-NEXT:    addi s3, s3, -1
 ; RV32I-NEXT:  .LBB26_4: # %start
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
@@ -1387,9 +1381,8 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    li a0, -505
-; RV64I-NEXT:    slli a1, a0, 53
-; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    li a1, -505
+; RV64I-NEXT:    slli a1, a1, 53
 ; RV64I-NEXT:    call __gedf2 at plt
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
@@ -1406,8 +1399,8 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
 ; RV64I-NEXT:    call __gtdf2 at plt
 ; RV64I-NEXT:    blez a0, .LBB26_4
 ; RV64I-NEXT:  # %bb.3: # %start
-; RV64I-NEXT:    lui a0, 8
-; RV64I-NEXT:    addiw s1, a0, -1
+; RV64I-NEXT:    lui s1, 8
+; RV64I-NEXT:    addiw s1, s1, -1
 ; RV64I-NEXT:  .LBB26_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
@@ -1492,9 +1485,8 @@ define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind {
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    mv s2, a0
-; RV32I-NEXT:    lui a0, 265984
-; RV32I-NEXT:    addi a3, a0, -32
-; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    lui a3, 265984
+; RV32I-NEXT:    addi a3, a3, -32
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __gtdf2 at plt
 ; RV32I-NEXT:    mv s3, a0
@@ -1693,9 +1685,8 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    li a0, -509
-; RV64I-NEXT:    slli a1, a0, 53
-; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    li a1, -509
+; RV64I-NEXT:    slli a1, a1, 53
 ; RV64I-NEXT:    call __gedf2 at plt
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
@@ -1705,8 +1696,8 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    li s1, -128
 ; RV64I-NEXT:  .LBB30_2: # %start
-; RV64I-NEXT:    lui a0, 65919
-; RV64I-NEXT:    slli a1, a0, 34
+; RV64I-NEXT:    lui a1, 65919
+; RV64I-NEXT:    slli a1, a1, 34
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __gtdf2 at plt
 ; RV64I-NEXT:    blez a0, .LBB30_4
@@ -1845,8 +1836,8 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __fixunsdfdi at plt
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    lui a0, 131967
-; RV64I-NEXT:    slli a1, a0, 33
+; RV64I-NEXT:    lui a1, 131967
+; RV64I-NEXT:    slli a1, a1, 33
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __gtdf2 at plt
 ; RV64I-NEXT:    blez a0, .LBB32_2
@@ -1902,10 +1893,9 @@ define zeroext i32 @fcvt_wu_d_sat_zext(double %a) nounwind {
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    lui a0, 270080
-; RV32I-NEXT:    addi a3, a0, -1
+; RV32I-NEXT:    lui a3, 270080
+; RV32I-NEXT:    addi a3, a3, -1
 ; RV32I-NEXT:    lui a2, 1048064
-; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __gtdf2 at plt
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    neg s2, a0
@@ -1993,10 +1983,9 @@ define signext i32 @fcvt_w_d_sat_sext(double %a) nounwind {
 ; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    lui a0, 269824
-; RV32I-NEXT:    addi a3, a0, -1
+; RV32I-NEXT:    lui a3, 269824
+; RV32I-NEXT:    addi a3, a3, -1
 ; RV32I-NEXT:    lui a2, 1047552
-; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __gtdf2 at plt
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    lui a3, 794112
@@ -2044,9 +2033,8 @@ define signext i32 @fcvt_w_d_sat_sext(double %a) nounwind {
 ; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    li a0, -497
-; RV64I-NEXT:    slli a1, a0, 53
-; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    li a1, -497
+; RV64I-NEXT:    slli a1, a1, 53
 ; RV64I-NEXT:    call __gedf2 at plt
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0

diff  --git a/llvm/test/CodeGen/RISCV/float-arith.ll b/llvm/test/CodeGen/RISCV/float-arith.ll
index 9fb3de2f0d002..a6026a6b70795 100644
--- a/llvm/test/CodeGen/RISCV/float-arith.ll
+++ b/llvm/test/CodeGen/RISCV/float-arith.ll
@@ -238,8 +238,8 @@ define float @fsgnjn_s(float %a, float %b) nounwind {
 ; RV32I-NEXT:    not a0, a0
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    slli a1, s0, 1
-; RV32I-NEXT:    srli a1, a1, 1
+; RV32I-NEXT:    slli s0, s0, 1
+; RV32I-NEXT:    srli a1, s0, 1
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -256,8 +256,8 @@ define float @fsgnjn_s(float %a, float %b) nounwind {
 ; RV64I-NEXT:    not a0, a0
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    slli a1, s0, 33
-; RV64I-NEXT:    srli a1, a1, 33
+; RV64I-NEXT:    slli s0, s0, 33
+; RV64I-NEXT:    srli a1, s0, 33
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
index c50af98adb9ab..72e0d578be1f5 100644
--- a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
@@ -73,8 +73,8 @@ define double @bitcast_double_and(double %a1, double %a2) nounwind {
 ; RV32F-NEXT:    mv s1, a0
 ; RV32F-NEXT:    call __adddf3 at plt
 ; RV32F-NEXT:    mv a2, a0
-; RV32F-NEXT:    slli a0, a1, 1
-; RV32F-NEXT:    srli a3, a0, 1
+; RV32F-NEXT:    slli a1, a1, 1
+; RV32F-NEXT:    srli a3, a1, 1
 ; RV32F-NEXT:    mv a0, s1
 ; RV32F-NEXT:    mv a1, s0
 ; RV32F-NEXT:    call __adddf3 at plt

diff  --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index bdb2161ff7a84..0b72c8716ff19 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -65,8 +65,8 @@ define i32 @fcvt_w_s_sat(float %a) nounwind {
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    lui s1, 524288
 ; RV32I-NEXT:  .LBB1_2: # %start
-; RV32I-NEXT:    lui a0, 323584
-; RV32I-NEXT:    addi a1, a0, -1
+; RV32I-NEXT:    lui a1, 323584
+; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __gtsf2 at plt
 ; RV32I-NEXT:    blez a0, .LBB1_4
@@ -107,8 +107,8 @@ define i32 @fcvt_w_s_sat(float %a) nounwind {
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    lui s1, 524288
 ; RV64I-NEXT:  .LBB1_2: # %start
-; RV64I-NEXT:    lui a0, 323584
-; RV64I-NEXT:    addiw a1, a0, -1
+; RV64I-NEXT:    lui a1, 323584
+; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    blez a0, .LBB1_4
@@ -239,8 +239,8 @@ define i32 @fcvt_wu_s_sat(float %a) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __fixunssfsi at plt
 ; RV32I-NEXT:    and s1, s1, a0
-; RV32I-NEXT:    lui a0, 325632
-; RV32I-NEXT:    addi a1, a0, -1
+; RV32I-NEXT:    lui a1, 325632
+; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __gtsf2 at plt
 ; RV32I-NEXT:    sgtz a0, a0
@@ -266,8 +266,8 @@ define i32 @fcvt_wu_s_sat(float %a) nounwind {
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __fixunssfdi at plt
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    lui a0, 325632
-; RV64I-NEXT:    addiw a1, a0, -1
+; RV64I-NEXT:    lui a1, 325632
+; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    blez a0, .LBB4_2
@@ -584,8 +584,8 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    lui s3, 524288
 ; RV32I-NEXT:  .LBB12_2: # %start
-; RV32I-NEXT:    lui a0, 389120
-; RV32I-NEXT:    addi s2, a0, -1
+; RV32I-NEXT:    lui s2, 389120
+; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    call __gtsf2 at plt
@@ -647,8 +647,8 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    slli s1, s3, 63
 ; RV64I-NEXT:  .LBB12_2: # %start
-; RV64I-NEXT:    lui a0, 389120
-; RV64I-NEXT:    addiw a1, a0, -1
+; RV64I-NEXT:    lui a1, 389120
+; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    blez a0, .LBB12_4
@@ -762,8 +762,8 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV32I-NEXT:    call __fixunssfdi at plt
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    and s3, s2, a0
-; RV32I-NEXT:    lui a0, 391168
-; RV32I-NEXT:    addi s2, a0, -1
+; RV32I-NEXT:    lui s2, 391168
+; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    call __gtsf2 at plt
@@ -805,8 +805,8 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixunssfdi at plt
 ; RV64I-NEXT:    and s1, s1, a0
-; RV64I-NEXT:    lui a0, 391168
-; RV64I-NEXT:    addiw a1, a0, -1
+; RV64I-NEXT:    lui a1, 391168
+; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    sgtz a0, a0
@@ -1204,8 +1204,8 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
 ; RV32I-NEXT:    call __gtsf2 at plt
 ; RV32I-NEXT:    blez a0, .LBB24_4
 ; RV32I-NEXT:  # %bb.3: # %start
-; RV32I-NEXT:    lui a0, 8
-; RV32I-NEXT:    addi s1, a0, -1
+; RV32I-NEXT:    lui s1, 8
+; RV32I-NEXT:    addi s1, s1, -1
 ; RV32I-NEXT:  .LBB24_4: # %start
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s0
@@ -1246,8 +1246,8 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
 ; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    blez a0, .LBB24_4
 ; RV64I-NEXT:  # %bb.3: # %start
-; RV64I-NEXT:    lui a0, 8
-; RV64I-NEXT:    addiw s1, a0, -1
+; RV64I-NEXT:    lui s1, 8
+; RV64I-NEXT:    addiw s1, s1, -1
 ; RV64I-NEXT:  .LBB24_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
@@ -1710,8 +1710,8 @@ define zeroext i32 @fcvt_wu_s_sat_zext(float %a) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __fixunssfsi at plt
 ; RV32I-NEXT:    and s1, s1, a0
-; RV32I-NEXT:    lui a0, 325632
-; RV32I-NEXT:    addi a1, a0, -1
+; RV32I-NEXT:    lui a1, 325632
+; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __gtsf2 at plt
 ; RV32I-NEXT:    sgtz a0, a0
@@ -1737,8 +1737,8 @@ define zeroext i32 @fcvt_wu_s_sat_zext(float %a) nounwind {
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __fixunssfdi at plt
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    lui a0, 325632
-; RV64I-NEXT:    addiw a1, a0, -1
+; RV64I-NEXT:    lui a1, 325632
+; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    blez a0, .LBB31_2
@@ -1794,8 +1794,8 @@ define signext i32 @fcvt_w_s_sat_sext(float %a) nounwind {
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    lui s1, 524288
 ; RV32I-NEXT:  .LBB32_2: # %start
-; RV32I-NEXT:    lui a0, 323584
-; RV32I-NEXT:    addi a1, a0, -1
+; RV32I-NEXT:    lui a1, 323584
+; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __gtsf2 at plt
 ; RV32I-NEXT:    blez a0, .LBB32_4
@@ -1836,8 +1836,8 @@ define signext i32 @fcvt_w_s_sat_sext(float %a) nounwind {
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    lui s1, 524288
 ; RV64I-NEXT:  .LBB32_2: # %start
-; RV64I-NEXT:    lui a0, 323584
-; RV64I-NEXT:    addiw a1, a0, -1
+; RV64I-NEXT:    lui a1, 323584
+; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    blez a0, .LBB32_4

diff  --git a/llvm/test/CodeGen/RISCV/fold-vector-cmp.ll b/llvm/test/CodeGen/RISCV/fold-vector-cmp.ll
index 0512ff4c4cdc0..eaabd23a7a582 100644
--- a/llvm/test/CodeGen/RISCV/fold-vector-cmp.ll
+++ b/llvm/test/CodeGen/RISCV/fold-vector-cmp.ll
@@ -17,9 +17,9 @@ define i32 @test(i32 %call.i) {
 ; CHECK-V-NEXT:    vmv.v.x v8, a1
 ; CHECK-V-NEXT:    vsetvli zero, zero, e32, mf2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addiw a0, a1, 2
+; CHECK-V-NEXT:    addiw a1, a1, 2
 ; CHECK-V-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-V-NEXT:    vmslt.vx v0, v8, a0
+; CHECK-V-NEXT:    vmslt.vx v0, v8, a1
 ; CHECK-V-NEXT:    vmv.v.i v8, 0
 ; CHECK-V-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/forced-atomics.ll b/llvm/test/CodeGen/RISCV/forced-atomics.ll
index edca10087e13f..6eda04bfa0c64 100644
--- a/llvm/test/CodeGen/RISCV/forced-atomics.ll
+++ b/llvm/test/CodeGen/RISCV/forced-atomics.ll
@@ -2957,8 +2957,8 @@ define double @rmw64_fadd_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    mv s0, a0
 ; RV64-NO-ATOMIC-NEXT:    ld s2, 0(a0)
-; RV64-NO-ATOMIC-NEXT:    li a0, 1023
-; RV64-NO-ATOMIC-NEXT:    slli s1, a0, 52
+; RV64-NO-ATOMIC-NEXT:    li s1, 1023
+; RV64-NO-ATOMIC-NEXT:    slli s1, s1, 52
 ; RV64-NO-ATOMIC-NEXT:  .LBB54_1: # %atomicrmw.start
 ; RV64-NO-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s2
@@ -2991,8 +2991,8 @@ define double @rmw64_fadd_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    mv s0, a0
 ; RV64-ATOMIC-NEXT:    ld a0, 0(a0)
-; RV64-ATOMIC-NEXT:    li a1, 1023
-; RV64-ATOMIC-NEXT:    slli s1, a1, 52
+; RV64-ATOMIC-NEXT:    li s1, 1023
+; RV64-ATOMIC-NEXT:    slli s1, s1, 52
 ; RV64-ATOMIC-NEXT:  .LBB54_1: # %atomicrmw.start
 ; RV64-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-NEXT:    mv s2, a0
@@ -3063,8 +3063,8 @@ define double @rmw64_fsub_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    mv s0, a0
 ; RV64-NO-ATOMIC-NEXT:    ld s2, 0(a0)
-; RV64-NO-ATOMIC-NEXT:    li a0, -1025
-; RV64-NO-ATOMIC-NEXT:    slli s1, a0, 52
+; RV64-NO-ATOMIC-NEXT:    li s1, -1025
+; RV64-NO-ATOMIC-NEXT:    slli s1, s1, 52
 ; RV64-NO-ATOMIC-NEXT:  .LBB55_1: # %atomicrmw.start
 ; RV64-NO-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s2
@@ -3097,8 +3097,8 @@ define double @rmw64_fsub_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    mv s0, a0
 ; RV64-ATOMIC-NEXT:    ld a0, 0(a0)
-; RV64-ATOMIC-NEXT:    li a1, -1025
-; RV64-ATOMIC-NEXT:    slli s1, a1, 52
+; RV64-ATOMIC-NEXT:    li s1, -1025
+; RV64-ATOMIC-NEXT:    slli s1, s1, 52
 ; RV64-ATOMIC-NEXT:  .LBB55_1: # %atomicrmw.start
 ; RV64-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-NEXT:    mv s2, a0
@@ -3169,8 +3169,8 @@ define double @rmw64_fmin_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    mv s0, a0
 ; RV64-NO-ATOMIC-NEXT:    ld s2, 0(a0)
-; RV64-NO-ATOMIC-NEXT:    li a0, 1023
-; RV64-NO-ATOMIC-NEXT:    slli s1, a0, 52
+; RV64-NO-ATOMIC-NEXT:    li s1, 1023
+; RV64-NO-ATOMIC-NEXT:    slli s1, s1, 52
 ; RV64-NO-ATOMIC-NEXT:  .LBB56_1: # %atomicrmw.start
 ; RV64-NO-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s2
@@ -3203,8 +3203,8 @@ define double @rmw64_fmin_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    mv s0, a0
 ; RV64-ATOMIC-NEXT:    ld a0, 0(a0)
-; RV64-ATOMIC-NEXT:    li a1, 1023
-; RV64-ATOMIC-NEXT:    slli s1, a1, 52
+; RV64-ATOMIC-NEXT:    li s1, 1023
+; RV64-ATOMIC-NEXT:    slli s1, s1, 52
 ; RV64-ATOMIC-NEXT:  .LBB56_1: # %atomicrmw.start
 ; RV64-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-NEXT:    mv s2, a0
@@ -3275,8 +3275,8 @@ define double @rmw64_fmax_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    mv s0, a0
 ; RV64-NO-ATOMIC-NEXT:    ld s2, 0(a0)
-; RV64-NO-ATOMIC-NEXT:    li a0, 1023
-; RV64-NO-ATOMIC-NEXT:    slli s1, a0, 52
+; RV64-NO-ATOMIC-NEXT:    li s1, 1023
+; RV64-NO-ATOMIC-NEXT:    slli s1, s1, 52
 ; RV64-NO-ATOMIC-NEXT:  .LBB57_1: # %atomicrmw.start
 ; RV64-NO-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s2
@@ -3309,8 +3309,8 @@ define double @rmw64_fmax_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    mv s0, a0
 ; RV64-ATOMIC-NEXT:    ld a0, 0(a0)
-; RV64-ATOMIC-NEXT:    li a1, 1023
-; RV64-ATOMIC-NEXT:    slli s1, a1, 52
+; RV64-ATOMIC-NEXT:    li s1, 1023
+; RV64-ATOMIC-NEXT:    slli s1, s1, 52
 ; RV64-ATOMIC-NEXT:  .LBB57_1: # %atomicrmw.start
 ; RV64-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-NEXT:    mv s2, a0

diff  --git a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
index fbab3376941da..5e9d031f1b009 100644
--- a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
@@ -831,8 +831,8 @@ define <2 x i16> @stest_f64i16(<2 x double> %x) {
 ; CHECK-NOV-LABEL: stest_f64i16:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.w.d a1, fa1, rtz
-; CHECK-NOV-NEXT:    lui a0, 8
-; CHECK-NOV-NEXT:    addiw a2, a0, -1
+; CHECK-NOV-NEXT:    lui a2, 8
+; CHECK-NOV-NEXT:    addiw a2, a2, -1
 ; CHECK-NOV-NEXT:    fcvt.w.d a0, fa0, rtz
 ; CHECK-NOV-NEXT:    bge a1, a2, .LBB9_5
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -884,8 +884,8 @@ define <2 x i16> @utest_f64i16(<2 x double> %x) {
 ; CHECK-NOV-LABEL: utest_f64i16:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.wu.d a0, fa0, rtz
-; CHECK-NOV-NEXT:    lui a1, 16
-; CHECK-NOV-NEXT:    addiw a2, a1, -1
+; CHECK-NOV-NEXT:    lui a2, 16
+; CHECK-NOV-NEXT:    addiw a2, a2, -1
 ; CHECK-NOV-NEXT:    fcvt.wu.d a1, fa1, rtz
 ; CHECK-NOV-NEXT:    bgeu a0, a2, .LBB10_3
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -921,8 +921,8 @@ define <2 x i16> @ustest_f64i16(<2 x double> %x) {
 ; CHECK-NOV-LABEL: ustest_f64i16:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.w.d a0, fa0, rtz
-; CHECK-NOV-NEXT:    lui a1, 16
-; CHECK-NOV-NEXT:    addiw a2, a1, -1
+; CHECK-NOV-NEXT:    lui a2, 16
+; CHECK-NOV-NEXT:    addiw a2, a2, -1
 ; CHECK-NOV-NEXT:    fcvt.w.d a1, fa1, rtz
 ; CHECK-NOV-NEXT:    blt a0, a2, .LBB11_2
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -965,8 +965,8 @@ define <4 x i16> @stest_f32i16(<4 x float> %x) {
 ; CHECK-NOV-LABEL: stest_f32i16:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.w.s a1, fa3, rtz
-; CHECK-NOV-NEXT:    lui a2, 8
-; CHECK-NOV-NEXT:    addiw a5, a2, -1
+; CHECK-NOV-NEXT:    lui a5, 8
+; CHECK-NOV-NEXT:    addiw a5, a5, -1
 ; CHECK-NOV-NEXT:    fcvt.w.s a2, fa2, rtz
 ; CHECK-NOV-NEXT:    bge a1, a5, .LBB12_10
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -1046,8 +1046,8 @@ define <4 x i16> @utest_f32i16(<4 x float> %x) {
 ; CHECK-NOV-LABEL: utest_f32i16:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.wu.s a1, fa0, rtz
-; CHECK-NOV-NEXT:    lui a2, 16
-; CHECK-NOV-NEXT:    addiw a3, a2, -1
+; CHECK-NOV-NEXT:    lui a3, 16
+; CHECK-NOV-NEXT:    addiw a3, a3, -1
 ; CHECK-NOV-NEXT:    fcvt.wu.s a2, fa1, rtz
 ; CHECK-NOV-NEXT:    bgeu a1, a3, .LBB13_6
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -1101,8 +1101,8 @@ define <4 x i16> @ustest_f32i16(<4 x float> %x) {
 ; CHECK-NOV-LABEL: ustest_f32i16:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.w.s a1, fa0, rtz
-; CHECK-NOV-NEXT:    lui a2, 16
-; CHECK-NOV-NEXT:    addiw a4, a2, -1
+; CHECK-NOV-NEXT:    lui a4, 16
+; CHECK-NOV-NEXT:    addiw a4, a4, -1
 ; CHECK-NOV-NEXT:    fcvt.w.s a2, fa1, rtz
 ; CHECK-NOV-NEXT:    bge a1, a4, .LBB14_6
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -1238,8 +1238,8 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    mv a0, s1
 ; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
 ; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-NOV-NEXT:    lui a1, 8
-; CHECK-NOV-NEXT:    addiw a7, a1, -1
+; CHECK-NOV-NEXT:    lui a7, 8
+; CHECK-NOV-NEXT:    addiw a7, a7, -1
 ; CHECK-NOV-NEXT:    bge a0, a7, .LBB15_18
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
 ; CHECK-NOV-NEXT:    fcvt.l.s a1, fs5, rtz
@@ -1828,8 +1828,8 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    mv a0, s1
 ; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
 ; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-NOV-NEXT:    lui a1, 16
-; CHECK-NOV-NEXT:    addiw a3, a1, -1
+; CHECK-NOV-NEXT:    lui a3, 16
+; CHECK-NOV-NEXT:    addiw a3, a3, -1
 ; CHECK-NOV-NEXT:    bge a0, a3, .LBB17_10
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
 ; CHECK-NOV-NEXT:    fcvt.l.s a1, fs5, rtz
@@ -4113,8 +4113,8 @@ define <2 x i16> @stest_f64i16_mm(<2 x double> %x) {
 ; CHECK-NOV-LABEL: stest_f64i16_mm:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.w.d a1, fa1, rtz
-; CHECK-NOV-NEXT:    lui a0, 8
-; CHECK-NOV-NEXT:    addiw a2, a0, -1
+; CHECK-NOV-NEXT:    lui a2, 8
+; CHECK-NOV-NEXT:    addiw a2, a2, -1
 ; CHECK-NOV-NEXT:    fcvt.w.d a0, fa0, rtz
 ; CHECK-NOV-NEXT:    bge a1, a2, .LBB36_5
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -4164,8 +4164,8 @@ define <2 x i16> @utest_f64i16_mm(<2 x double> %x) {
 ; CHECK-NOV-LABEL: utest_f64i16_mm:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.wu.d a0, fa0, rtz
-; CHECK-NOV-NEXT:    lui a1, 16
-; CHECK-NOV-NEXT:    addiw a2, a1, -1
+; CHECK-NOV-NEXT:    lui a2, 16
+; CHECK-NOV-NEXT:    addiw a2, a2, -1
 ; CHECK-NOV-NEXT:    fcvt.wu.d a1, fa1, rtz
 ; CHECK-NOV-NEXT:    bgeu a0, a2, .LBB37_3
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -4200,8 +4200,8 @@ define <2 x i16> @ustest_f64i16_mm(<2 x double> %x) {
 ; CHECK-NOV-LABEL: ustest_f64i16_mm:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.w.d a1, fa1, rtz
-; CHECK-NOV-NEXT:    lui a0, 16
-; CHECK-NOV-NEXT:    addiw a2, a0, -1
+; CHECK-NOV-NEXT:    lui a2, 16
+; CHECK-NOV-NEXT:    addiw a2, a2, -1
 ; CHECK-NOV-NEXT:    fcvt.w.d a0, fa0, rtz
 ; CHECK-NOV-NEXT:    blt a1, a2, .LBB38_2
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -4242,8 +4242,8 @@ define <4 x i16> @stest_f32i16_mm(<4 x float> %x) {
 ; CHECK-NOV-LABEL: stest_f32i16_mm:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.w.s a1, fa3, rtz
-; CHECK-NOV-NEXT:    lui a2, 8
-; CHECK-NOV-NEXT:    addiw a5, a2, -1
+; CHECK-NOV-NEXT:    lui a5, 8
+; CHECK-NOV-NEXT:    addiw a5, a5, -1
 ; CHECK-NOV-NEXT:    fcvt.w.s a2, fa2, rtz
 ; CHECK-NOV-NEXT:    bge a1, a5, .LBB39_10
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -4321,8 +4321,8 @@ define <4 x i16> @utest_f32i16_mm(<4 x float> %x) {
 ; CHECK-NOV-LABEL: utest_f32i16_mm:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.wu.s a1, fa0, rtz
-; CHECK-NOV-NEXT:    lui a2, 16
-; CHECK-NOV-NEXT:    addiw a3, a2, -1
+; CHECK-NOV-NEXT:    lui a3, 16
+; CHECK-NOV-NEXT:    addiw a3, a3, -1
 ; CHECK-NOV-NEXT:    fcvt.wu.s a2, fa1, rtz
 ; CHECK-NOV-NEXT:    bgeu a1, a3, .LBB40_6
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -4375,8 +4375,8 @@ define <4 x i16> @ustest_f32i16_mm(<4 x float> %x) {
 ; CHECK-NOV-LABEL: ustest_f32i16_mm:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.w.s a1, fa3, rtz
-; CHECK-NOV-NEXT:    lui a2, 16
-; CHECK-NOV-NEXT:    addiw a4, a2, -1
+; CHECK-NOV-NEXT:    lui a4, 16
+; CHECK-NOV-NEXT:    addiw a4, a4, -1
 ; CHECK-NOV-NEXT:    fcvt.w.s a2, fa2, rtz
 ; CHECK-NOV-NEXT:    bge a1, a4, .LBB41_6
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -4510,8 +4510,8 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    mv a0, s1
 ; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
 ; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-NOV-NEXT:    lui a1, 8
-; CHECK-NOV-NEXT:    addiw a7, a1, -1
+; CHECK-NOV-NEXT:    lui a7, 8
+; CHECK-NOV-NEXT:    addiw a7, a7, -1
 ; CHECK-NOV-NEXT:    bge a0, a7, .LBB42_18
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
 ; CHECK-NOV-NEXT:    fcvt.l.s a1, fs5, rtz
@@ -4823,8 +4823,8 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
 ; CHECK-NOV-NEXT:    fmv.s fs0, fa0
 ; CHECK-NOV-NEXT:    fcvt.lu.s s3, fs6, rtz
-; CHECK-NOV-NEXT:    fcvt.lu.s a0, fs5, rtz
-; CHECK-NOV-NEXT:    sext.w s2, a0
+; CHECK-NOV-NEXT:    fcvt.lu.s s2, fs5, rtz
+; CHECK-NOV-NEXT:    sext.w s2, s2
 ; CHECK-NOV-NEXT:    mv a0, s1
 ; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
 ; CHECK-NOV-NEXT:    fcvt.lu.s a0, fa0, rtz
@@ -5095,8 +5095,8 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    mv a0, s1
 ; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
 ; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-NOV-NEXT:    lui a1, 16
-; CHECK-NOV-NEXT:    addiw a3, a1, -1
+; CHECK-NOV-NEXT:    lui a3, 16
+; CHECK-NOV-NEXT:    addiw a3, a3, -1
 ; CHECK-NOV-NEXT:    bge a0, a3, .LBB44_10
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
 ; CHECK-NOV-NEXT:    fcvt.l.s a1, fs5, rtz
@@ -5587,8 +5587,8 @@ define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    snez a1, s1
 ; CHECK-NOV-NEXT:    addi a1, a1, -1
 ; CHECK-NOV-NEXT:    and a1, a1, s0
-; CHECK-NOV-NEXT:    addi a2, s1, -1
-; CHECK-NOV-NEXT:    seqz a2, a2
+; CHECK-NOV-NEXT:    addi s1, s1, -1
+; CHECK-NOV-NEXT:    seqz a2, s1
 ; CHECK-NOV-NEXT:    addi a2, a2, -1
 ; CHECK-NOV-NEXT:    and a1, a2, a1
 ; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -5627,8 +5627,8 @@ define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
 ; CHECK-V-NEXT:    snez a2, s1
 ; CHECK-V-NEXT:    addi a2, a2, -1
 ; CHECK-V-NEXT:    and a2, a2, s0
-; CHECK-V-NEXT:    addi a3, s1, -1
-; CHECK-V-NEXT:    seqz a3, a3
+; CHECK-V-NEXT:    addi s1, s1, -1
+; CHECK-V-NEXT:    seqz a3, s1
 ; CHECK-V-NEXT:    addi a3, a3, -1
 ; CHECK-V-NEXT:    and a2, a3, a2
 ; CHECK-V-NEXT:    snez a3, a1
@@ -5694,14 +5694,14 @@ define <2 x i64> @ustest_f64i64_mm(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    slti a3, a1, 1
 ; CHECK-NOV-NEXT:    neg a3, a3
 ; CHECK-NOV-NEXT:    and a3, a3, a0
-; CHECK-NOV-NEXT:    addi a0, a1, -1
-; CHECK-NOV-NEXT:    seqz a0, a0
-; CHECK-NOV-NEXT:    addi a1, a0, -1
+; CHECK-NOV-NEXT:    addi a1, a1, -1
+; CHECK-NOV-NEXT:    seqz a1, a1
+; CHECK-NOV-NEXT:    addi a1, a1, -1
 ; CHECK-NOV-NEXT:    slti a0, s1, 1
 ; CHECK-NOV-NEXT:    neg a0, a0
 ; CHECK-NOV-NEXT:    and a0, a0, s0
-; CHECK-NOV-NEXT:    addi a5, s1, -1
-; CHECK-NOV-NEXT:    seqz a5, a5
+; CHECK-NOV-NEXT:    addi s1, s1, -1
+; CHECK-NOV-NEXT:    seqz a5, s1
 ; CHECK-NOV-NEXT:    addi a5, a5, -1
 ; CHECK-NOV-NEXT:    and a0, a5, a0
 ; CHECK-NOV-NEXT:    beqz a4, .LBB47_6
@@ -6074,8 +6074,8 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    snez a1, s1
 ; CHECK-NOV-NEXT:    addi a1, a1, -1
 ; CHECK-NOV-NEXT:    and a1, a1, s0
-; CHECK-NOV-NEXT:    addi a2, s1, -1
-; CHECK-NOV-NEXT:    seqz a2, a2
+; CHECK-NOV-NEXT:    addi s1, s1, -1
+; CHECK-NOV-NEXT:    seqz a2, s1
 ; CHECK-NOV-NEXT:    addi a2, a2, -1
 ; CHECK-NOV-NEXT:    and a1, a2, a1
 ; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -6114,8 +6114,8 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
 ; CHECK-V-NEXT:    snez a2, s1
 ; CHECK-V-NEXT:    addi a2, a2, -1
 ; CHECK-V-NEXT:    and a2, a2, s0
-; CHECK-V-NEXT:    addi a3, s1, -1
-; CHECK-V-NEXT:    seqz a3, a3
+; CHECK-V-NEXT:    addi s1, s1, -1
+; CHECK-V-NEXT:    seqz a3, s1
 ; CHECK-V-NEXT:    addi a3, a3, -1
 ; CHECK-V-NEXT:    and a2, a3, a2
 ; CHECK-V-NEXT:    snez a3, a1
@@ -6181,14 +6181,14 @@ define <2 x i64> @ustest_f32i64_mm(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    slti a3, a1, 1
 ; CHECK-NOV-NEXT:    neg a3, a3
 ; CHECK-NOV-NEXT:    and a3, a3, a0
-; CHECK-NOV-NEXT:    addi a0, a1, -1
-; CHECK-NOV-NEXT:    seqz a0, a0
-; CHECK-NOV-NEXT:    addi a1, a0, -1
+; CHECK-NOV-NEXT:    addi a1, a1, -1
+; CHECK-NOV-NEXT:    seqz a1, a1
+; CHECK-NOV-NEXT:    addi a1, a1, -1
 ; CHECK-NOV-NEXT:    slti a0, s1, 1
 ; CHECK-NOV-NEXT:    neg a0, a0
 ; CHECK-NOV-NEXT:    and a0, a0, s0
-; CHECK-NOV-NEXT:    addi a5, s1, -1
-; CHECK-NOV-NEXT:    seqz a5, a5
+; CHECK-NOV-NEXT:    addi s1, s1, -1
+; CHECK-NOV-NEXT:    seqz a5, s1
 ; CHECK-NOV-NEXT:    addi a5, a5, -1
 ; CHECK-NOV-NEXT:    and a0, a5, a0
 ; CHECK-NOV-NEXT:    beqz a4, .LBB50_6
@@ -6557,8 +6557,8 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    snez a1, s2
 ; CHECK-NOV-NEXT:    addi a1, a1, -1
 ; CHECK-NOV-NEXT:    and a1, a1, s1
-; CHECK-NOV-NEXT:    addi a2, s2, -1
-; CHECK-NOV-NEXT:    seqz a2, a2
+; CHECK-NOV-NEXT:    addi s2, s2, -1
+; CHECK-NOV-NEXT:    seqz a2, s2
 ; CHECK-NOV-NEXT:    addi a2, a2, -1
 ; CHECK-NOV-NEXT:    and a1, a2, a1
 ; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -6599,8 +6599,8 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
 ; CHECK-V-NEXT:    snez a1, s2
 ; CHECK-V-NEXT:    addi a1, a1, -1
 ; CHECK-V-NEXT:    and a1, a1, s1
-; CHECK-V-NEXT:    addi a2, s2, -1
-; CHECK-V-NEXT:    seqz a2, a2
+; CHECK-V-NEXT:    addi s2, s2, -1
+; CHECK-V-NEXT:    seqz a2, s2
 ; CHECK-V-NEXT:    addi a2, a2, -1
 ; CHECK-V-NEXT:    and a1, a2, a1
 ; CHECK-V-NEXT:    sd a1, 8(sp)
@@ -6659,14 +6659,14 @@ define <2 x i64> @ustest_f16i64_mm(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    slti a3, a1, 1
 ; CHECK-NOV-NEXT:    neg a3, a3
 ; CHECK-NOV-NEXT:    and a3, a3, a0
-; CHECK-NOV-NEXT:    addi a0, a1, -1
-; CHECK-NOV-NEXT:    seqz a0, a0
-; CHECK-NOV-NEXT:    addi a1, a0, -1
+; CHECK-NOV-NEXT:    addi a1, a1, -1
+; CHECK-NOV-NEXT:    seqz a1, a1
+; CHECK-NOV-NEXT:    addi a1, a1, -1
 ; CHECK-NOV-NEXT:    slti a0, s1, 1
 ; CHECK-NOV-NEXT:    neg a0, a0
 ; CHECK-NOV-NEXT:    and a0, a0, s0
-; CHECK-NOV-NEXT:    addi a5, s1, -1
-; CHECK-NOV-NEXT:    seqz a5, a5
+; CHECK-NOV-NEXT:    addi s1, s1, -1
+; CHECK-NOV-NEXT:    seqz a5, s1
 ; CHECK-NOV-NEXT:    addi a5, a5, -1
 ; CHECK-NOV-NEXT:    and a0, a5, a0
 ; CHECK-NOV-NEXT:    beqz a4, .LBB53_6
@@ -6722,14 +6722,14 @@ define <2 x i64> @ustest_f16i64_mm(<2 x half> %x) {
 ; CHECK-V-NEXT:    slti a3, a1, 1
 ; CHECK-V-NEXT:    neg a3, a3
 ; CHECK-V-NEXT:    and a3, a3, a0
-; CHECK-V-NEXT:    addi a0, a1, -1
-; CHECK-V-NEXT:    seqz a0, a0
-; CHECK-V-NEXT:    addi a1, a0, -1
+; CHECK-V-NEXT:    addi a1, a1, -1
+; CHECK-V-NEXT:    seqz a1, a1
+; CHECK-V-NEXT:    addi a1, a1, -1
 ; CHECK-V-NEXT:    slti a0, s1, 1
 ; CHECK-V-NEXT:    neg a0, a0
 ; CHECK-V-NEXT:    and a0, a0, s0
-; CHECK-V-NEXT:    addi a5, s1, -1
-; CHECK-V-NEXT:    seqz a5, a5
+; CHECK-V-NEXT:    addi s1, s1, -1
+; CHECK-V-NEXT:    seqz a5, s1
 ; CHECK-V-NEXT:    addi a5, a5, -1
 ; CHECK-V-NEXT:    and a0, a5, a0
 ; CHECK-V-NEXT:    beqz a4, .LBB53_6

diff  --git a/llvm/test/CodeGen/RISCV/half-arith.ll b/llvm/test/CodeGen/RISCV/half-arith.ll
index 39cea9c2c65e9..53612aae60f46 100644
--- a/llvm/test/CodeGen/RISCV/half-arith.ll
+++ b/llvm/test/CodeGen/RISCV/half-arith.ll
@@ -438,8 +438,8 @@ define half @fsgnjn_s(half %a, half %b) nounwind {
 ; RV32I-NEXT:    call __truncsfhf2 at plt
 ; RV32I-NEXT:    lui a1, 1048568
 ; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    slli a1, s1, 17
-; RV32I-NEXT:    srli a1, a1, 17
+; RV32I-NEXT:    slli s1, s1, 17
+; RV32I-NEXT:    srli a1, s1, 17
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -477,8 +477,8 @@ define half @fsgnjn_s(half %a, half %b) nounwind {
 ; RV64I-NEXT:    call __truncsfhf2 at plt
 ; RV64I-NEXT:    lui a1, 1048568
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    slli a1, s1, 49
-; RV64I-NEXT:    srli a1, a1, 49
+; RV64I-NEXT:    slli s1, s1, 49
+; RV64I-NEXT:    srli a1, s1, 49
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -908,8 +908,8 @@ define half @fnmadd_s(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s0, a1
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi s3, a1, -1
+; RV32I-NEXT:    lui s3, 16
+; RV32I-NEXT:    addi s3, s3, -1
 ; RV32I-NEXT:    and a0, a0, s3
 ; RV32I-NEXT:    call __extendhfsf2 at plt
 ; RV32I-NEXT:    li a1, 0
@@ -966,8 +966,8 @@ define half @fnmadd_s(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    sd s4, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s1, a2
 ; RV64I-NEXT:    mv s0, a1
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw s3, a1, -1
+; RV64I-NEXT:    lui s3, 16
+; RV64I-NEXT:    addiw s3, s3, -1
 ; RV64I-NEXT:    and a0, a0, s3
 ; RV64I-NEXT:    call __extendhfsf2 at plt
 ; RV64I-NEXT:    li a1, 0
@@ -1040,8 +1040,8 @@ define half @fnmadd_s_2(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    lui a0, 16
-; RV32I-NEXT:    addi s3, a0, -1
+; RV32I-NEXT:    lui s3, 16
+; RV32I-NEXT:    addi s3, s3, -1
 ; RV32I-NEXT:    and a0, a1, s3
 ; RV32I-NEXT:    call __extendhfsf2 at plt
 ; RV32I-NEXT:    li a1, 0
@@ -1098,8 +1098,8 @@ define half @fnmadd_s_2(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    sd s4, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s1, a2
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    lui a0, 16
-; RV64I-NEXT:    addiw s3, a0, -1
+; RV64I-NEXT:    lui s3, 16
+; RV64I-NEXT:    addiw s3, s3, -1
 ; RV64I-NEXT:    and a0, a1, s3
 ; RV64I-NEXT:    call __extendhfsf2 at plt
 ; RV64I-NEXT:    li a1, 0
@@ -1743,8 +1743,8 @@ define half @fnmadd_s_contract(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a2
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi s3, a1, -1
+; RV32I-NEXT:    lui s3, 16
+; RV32I-NEXT:    addi s3, s3, -1
 ; RV32I-NEXT:    and a0, a0, s3
 ; RV32I-NEXT:    call __extendhfsf2 at plt
 ; RV32I-NEXT:    li a1, 0
@@ -1804,8 +1804,8 @@ define half @fnmadd_s_contract(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw s3, a1, -1
+; RV64I-NEXT:    lui s3, 16
+; RV64I-NEXT:    addiw s3, s3, -1
 ; RV64I-NEXT:    and a0, a0, s3
 ; RV64I-NEXT:    call __extendhfsf2 at plt
 ; RV64I-NEXT:    li a1, 0
@@ -1882,8 +1882,8 @@ define half @fnmsub_s_contract(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a2
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi s3, a1, -1
+; RV32I-NEXT:    lui s3, 16
+; RV32I-NEXT:    addi s3, s3, -1
 ; RV32I-NEXT:    and a0, a0, s3
 ; RV32I-NEXT:    call __extendhfsf2 at plt
 ; RV32I-NEXT:    li a1, 0
@@ -1933,8 +1933,8 @@ define half @fnmsub_s_contract(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw s3, a1, -1
+; RV64I-NEXT:    lui s3, 16
+; RV64I-NEXT:    addiw s3, s3, -1
 ; RV64I-NEXT:    and a0, a0, s3
 ; RV64I-NEXT:    call __extendhfsf2 at plt
 ; RV64I-NEXT:    li a1, 0

diff  --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index 97f0520a77117..9ef663c62936e 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -152,8 +152,8 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    call __gtsf2 at plt
 ; RV32I-NEXT:    blez a0, .LBB1_4
 ; RV32I-NEXT:  # %bb.3: # %start
-; RV32I-NEXT:    lui a0, 8
-; RV32I-NEXT:    addi s1, a0, -1
+; RV32I-NEXT:    lui s1, 8
+; RV32I-NEXT:    addi s1, s1, -1
 ; RV32I-NEXT:  .LBB1_4: # %start
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s0
@@ -195,8 +195,8 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    blez a0, .LBB1_4
 ; RV64I-NEXT:  # %bb.3: # %start
-; RV64I-NEXT:    lui a0, 8
-; RV64I-NEXT:    addiw s1, a0, -1
+; RV64I-NEXT:    lui s1, 8
+; RV64I-NEXT:    addiw s1, s1, -1
 ; RV64I-NEXT:  .LBB1_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
@@ -317,8 +317,8 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi s0, a1, -1
+; RV32I-NEXT:    lui s0, 16
+; RV32I-NEXT:    addi s0, s0, -1
 ; RV32I-NEXT:    and a0, a0, s0
 ; RV32I-NEXT:    call __extendhfsf2 at plt
 ; RV32I-NEXT:    mv s3, a0
@@ -355,8 +355,8 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw s0, a1, -1
+; RV64I-NEXT:    lui s0, 16
+; RV64I-NEXT:    addiw s0, s0, -1
 ; RV64I-NEXT:    and a0, a0, s0
 ; RV64I-NEXT:    call __extendhfsf2 at plt
 ; RV64I-NEXT:    mv s3, a0
@@ -484,8 +484,8 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    lui s1, 524288
 ; RV32I-NEXT:  .LBB5_2: # %start
-; RV32I-NEXT:    lui a0, 323584
-; RV32I-NEXT:    addi a1, a0, -1
+; RV32I-NEXT:    lui a1, 323584
+; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __gtsf2 at plt
 ; RV32I-NEXT:    blez a0, .LBB5_4
@@ -529,8 +529,8 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    lui s1, 524288
 ; RV64I-NEXT:  .LBB5_2: # %start
-; RV64I-NEXT:    lui a0, 323584
-; RV64I-NEXT:    addiw a1, a0, -1
+; RV64I-NEXT:    lui a1, 323584
+; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    blez a0, .LBB5_4
@@ -718,9 +718,8 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __extendhfsf2 at plt
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    lui a0, 325632
-; RV32I-NEXT:    addi a1, a0, -1
-; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    lui a1, 325632
+; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    call __gtsf2 at plt
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    neg s1, a0
@@ -757,8 +756,8 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __fixunssfdi at plt
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    lui a0, 325632
-; RV64I-NEXT:    addiw a1, a0, -1
+; RV64I-NEXT:    lui a1, 325632
+; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    blez a0, .LBB8_2
@@ -961,8 +960,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    lui s2, 524288
 ; RV32I-NEXT:  .LBB10_2: # %start
-; RV32I-NEXT:    lui a0, 389120
-; RV32I-NEXT:    addi s3, a0, -1
+; RV32I-NEXT:    lui s3, 389120
+; RV32I-NEXT:    addi s3, s3, -1
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s3
 ; RV32I-NEXT:    call __gtsf2 at plt
@@ -1027,8 +1026,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    slli s1, s3, 63
 ; RV64I-NEXT:  .LBB10_2: # %start
-; RV64I-NEXT:    lui a0, 389120
-; RV64I-NEXT:    addiw a1, a0, -1
+; RV64I-NEXT:    lui a1, 389120
+; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    blez a0, .LBB10_4
@@ -1192,9 +1191,8 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __extendhfsf2 at plt
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    lui a0, 391168
-; RV32I-NEXT:    addi s1, a0, -1
-; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    lui s1, 391168
+; RV32I-NEXT:    addi s1, s1, -1
 ; RV32I-NEXT:    mv a1, s1
 ; RV32I-NEXT:    call __gtsf2 at plt
 ; RV32I-NEXT:    sgtz a0, a0
@@ -1242,9 +1240,8 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __extendhfsf2 at plt
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    lui a0, 391168
-; RV64I-NEXT:    addiw a1, a0, -1
-; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    lui a1, 391168
+; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    sgtz a0, a0
 ; RV64I-NEXT:    neg s1, a0
@@ -2267,8 +2264,8 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
 ; RV32I-NEXT:    call __gtsf2 at plt
 ; RV32I-NEXT:    blez a0, .LBB32_4
 ; RV32I-NEXT:  # %bb.3: # %start
-; RV32I-NEXT:    lui a0, 8
-; RV32I-NEXT:    addi s1, a0, -1
+; RV32I-NEXT:    lui s1, 8
+; RV32I-NEXT:    addi s1, s1, -1
 ; RV32I-NEXT:  .LBB32_4: # %start
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s0
@@ -2312,8 +2309,8 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
 ; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    blez a0, .LBB32_4
 ; RV64I-NEXT:  # %bb.3: # %start
-; RV64I-NEXT:    lui a0, 8
-; RV64I-NEXT:    addiw s1, a0, -1
+; RV64I-NEXT:    lui s1, 8
+; RV64I-NEXT:    addiw s1, s1, -1
 ; RV64I-NEXT:  .LBB32_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
@@ -2435,8 +2432,8 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
 ; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi s3, a1, -1
+; RV32I-NEXT:    lui s3, 16
+; RV32I-NEXT:    addi s3, s3, -1
 ; RV32I-NEXT:    and a0, a0, s3
 ; RV32I-NEXT:    call __extendhfsf2 at plt
 ; RV32I-NEXT:    mv s2, a0
@@ -2476,8 +2473,8 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
 ; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw s3, a1, -1
+; RV64I-NEXT:    lui s3, 16
+; RV64I-NEXT:    addiw s3, s3, -1
 ; RV64I-NEXT:    and a0, a0, s3
 ; RV64I-NEXT:    call __extendhfsf2 at plt
 ; RV64I-NEXT:    mv s2, a0
@@ -2941,9 +2938,8 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
 ; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __extendhfsf2 at plt
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    lui a0, 325632
-; RV32I-NEXT:    addi a1, a0, -1
-; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    lui a1, 325632
+; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    call __gtsf2 at plt
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    neg s1, a0
@@ -2980,8 +2976,8 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __fixunssfdi at plt
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    lui a0, 325632
-; RV64I-NEXT:    addiw a1, a0, -1
+; RV64I-NEXT:    lui a1, 325632
+; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    blez a0, .LBB39_2
@@ -3058,8 +3054,8 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    lui s1, 524288
 ; RV32I-NEXT:  .LBB40_2: # %start
-; RV32I-NEXT:    lui a0, 323584
-; RV32I-NEXT:    addi a1, a0, -1
+; RV32I-NEXT:    lui a1, 323584
+; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __gtsf2 at plt
 ; RV32I-NEXT:    blez a0, .LBB40_4
@@ -3103,8 +3099,8 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    lui s1, 524288
 ; RV64I-NEXT:  .LBB40_2: # %start
-; RV64I-NEXT:    lui a0, 323584
-; RV64I-NEXT:    addiw a1, a0, -1
+; RV64I-NEXT:    lui a1, 323584
+; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    blez a0, .LBB40_4

diff  --git a/llvm/test/CodeGen/RISCV/imm.ll b/llvm/test/CodeGen/RISCV/imm.ll
index d548948905283..4e80038dabcad 100644
--- a/llvm/test/CodeGen/RISCV/imm.ll
+++ b/llvm/test/CodeGen/RISCV/imm.ll
@@ -733,8 +733,8 @@ define i64 @imm_left_shifted_lui_2() nounwind {
 define i64 @imm_left_shifted_lui_3() nounwind {
 ; RV32I-LABEL: imm_left_shifted_lui_3:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a0, 1
-; RV32I-NEXT:    addi a1, a0, 1
+; RV32I-NEXT:    lui a1, 1
+; RV32I-NEXT:    addi a1, a1, 1
 ; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
 ;
@@ -2316,8 +2316,8 @@ define i64 @li_rori_1() {
 define i64 @li_rori_2() {
 ; RV32I-LABEL: li_rori_2:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a0, 720896
-; RV32I-NEXT:    addi a1, a0, -1
+; RV32I-NEXT:    lui a1, 720896
+; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    li a0, -6
 ; RV32I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/narrow-shl-cst.ll b/llvm/test/CodeGen/RISCV/narrow-shl-cst.ll
index 7434f620bef0e..64d2caabede04 100644
--- a/llvm/test/CodeGen/RISCV/narrow-shl-cst.ll
+++ b/llvm/test/CodeGen/RISCV/narrow-shl-cst.ll
@@ -39,8 +39,8 @@ define signext i32 @test2(i32 signext %x) nounwind {
 define i64 @test3(i64 %x) nounwind {
 ; RV32-LABEL: test3:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    andi a0, a0, 241
-; RV32-NEXT:    slli a1, a0, 8
+; RV32-NEXT:    andi a1, a0, 241
+; RV32-NEXT:    slli a1, a1, 8
 ; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
 ;
@@ -57,8 +57,8 @@ define i64 @test3(i64 %x) nounwind {
 define i64 @test4(i64 %x) nounwind {
 ; RV32-LABEL: test4:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    ori a0, a0, 241
-; RV32-NEXT:    slli a1, a0, 8
+; RV32-NEXT:    ori a1, a0, 241
+; RV32-NEXT:    slli a1, a1, 8
 ; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
 ;
@@ -75,8 +75,8 @@ define i64 @test4(i64 %x) nounwind {
 define i64 @test5(i64 %x) nounwind {
 ; RV32-LABEL: test5:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    ori a0, a0, 31
-; RV32-NEXT:    slli a1, a0, 8
+; RV32-NEXT:    ori a1, a0, 31
+; RV32-NEXT:    slli a1, a1, 8
 ; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
 ;
@@ -93,8 +93,8 @@ define i64 @test5(i64 %x) nounwind {
 define i64 @test6(i64 %x) nounwind {
 ; RV32-LABEL: test6:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    xori a0, a0, 241
-; RV32-NEXT:    slli a1, a0, 8
+; RV32-NEXT:    xori a1, a0, 241
+; RV32-NEXT:    slli a1, a1, 8
 ; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
 ;
@@ -111,8 +111,8 @@ define i64 @test6(i64 %x) nounwind {
 define i64 @test7(i64 %x) nounwind {
 ; RV32-LABEL: test7:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    xori a0, a0, 31
-; RV32-NEXT:    slli a1, a0, 8
+; RV32-NEXT:    xori a1, a0, 31
+; RV32-NEXT:    slli a1, a1, 8
 ; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll b/llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll
index 2fd283d600e0e..61477589f8455 100644
--- a/llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll
+++ b/llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll
@@ -64,16 +64,16 @@ define void @test2(ptr nocapture noundef %a, i32 noundef signext %n) {
 ; CHECK-NEXT:    lw a6, 0(a4)
 ; CHECK-NEXT:    addiw a5, a5, 4
 ; CHECK-NEXT:    sw a5, -4(a4)
-; CHECK-NEXT:    addiw a5, a6, 4
-; CHECK-NEXT:    sw a5, 0(a4)
+; CHECK-NEXT:    addiw a6, a6, 4
+; CHECK-NEXT:    sw a6, 0(a4)
 ; CHECK-NEXT:    addi a3, a3, 2
 ; CHECK-NEXT:    addi a4, a4, 8
 ; CHECK-NEXT:    bne a1, a3, .LBB1_4
 ; CHECK-NEXT:  .LBB1_5: # %for.cond.cleanup.loopexit.unr-lcssa
 ; CHECK-NEXT:    beqz a2, .LBB1_7
 ; CHECK-NEXT:  # %bb.6: # %for.body.epil
-; CHECK-NEXT:    slli a1, a3, 2
-; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    slli a3, a3, 2
+; CHECK-NEXT:    add a0, a0, a3
 ; CHECK-NEXT:    lw a1, 0(a0)
 ; CHECK-NEXT:    addiw a1, a1, 4
 ; CHECK-NEXT:    sw a1, 0(a0)

diff  --git a/llvm/test/CodeGen/RISCV/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/rotl-rotr.ll
index 982885303fa53..3992992bc9ba0 100644
--- a/llvm/test/CodeGen/RISCV/rotl-rotr.ll
+++ b/llvm/test/CodeGen/RISCV/rotl-rotr.ll
@@ -1211,8 +1211,8 @@ define signext i64 @rotr_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign
 ; RV32I-NEXT:    sll a1, a1, a7
 ; RV32I-NEXT:    or a1, a1, t0
 ; RV32I-NEXT:    srl t0, a0, a4
-; RV32I-NEXT:    slli a0, a6, 1
-; RV32I-NEXT:    sll a6, a0, a7
+; RV32I-NEXT:    slli a6, a6, 1
+; RV32I-NEXT:    sll a6, a6, a7
 ; RV32I-NEXT:    addi a0, a5, -32
 ; RV32I-NEXT:    or a6, a6, t0
 ; RV32I-NEXT:    bltz a0, .LBB19_6
@@ -1265,8 +1265,8 @@ define signext i64 @rotr_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign
 ; RV32ZBB-NEXT:    sll a1, a1, a7
 ; RV32ZBB-NEXT:    or a1, a1, t0
 ; RV32ZBB-NEXT:    srl t0, a0, a4
-; RV32ZBB-NEXT:    slli a0, a6, 1
-; RV32ZBB-NEXT:    sll a6, a0, a7
+; RV32ZBB-NEXT:    slli a6, a6, 1
+; RV32ZBB-NEXT:    sll a6, a6, a7
 ; RV32ZBB-NEXT:    addi a0, a5, -32
 ; RV32ZBB-NEXT:    or a6, a6, t0
 ; RV32ZBB-NEXT:    bltz a0, .LBB19_6
@@ -1528,8 +1528,8 @@ define i64 @rotr_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind {
 ; RV32I-NEXT:    not a0, a4
 ; RV32I-NEXT:    sll t0, t0, a0
 ; RV32I-NEXT:    srl t1, a1, a4
-; RV32I-NEXT:    slli a1, a6, 1
-; RV32I-NEXT:    sll t2, a1, a0
+; RV32I-NEXT:    slli a6, a6, 1
+; RV32I-NEXT:    sll t2, a6, a0
 ; RV32I-NEXT:    mv a6, a2
 ; RV32I-NEXT:    beqz a5, .LBB23_6
 ; RV32I-NEXT:  # %bb.5:
@@ -1546,8 +1546,8 @@ define i64 @rotr_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind {
 ; RV32I-NEXT:    sll a2, a2, a0
 ; RV32I-NEXT:    or a2, a2, t0
 ; RV32I-NEXT:    srl a3, a3, a4
-; RV32I-NEXT:    slli a4, a6, 1
-; RV32I-NEXT:    sll a0, a4, a0
+; RV32I-NEXT:    slli a6, a6, 1
+; RV32I-NEXT:    sll a0, a6, a0
 ; RV32I-NEXT:    or a0, a0, a3
 ; RV32I-NEXT:    add a3, a7, a0
 ; RV32I-NEXT:    add a0, a1, a2
@@ -1584,8 +1584,8 @@ define i64 @rotr_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind {
 ; RV32ZBB-NEXT:    not a0, a4
 ; RV32ZBB-NEXT:    sll t0, t0, a0
 ; RV32ZBB-NEXT:    srl t1, a1, a4
-; RV32ZBB-NEXT:    slli a1, a6, 1
-; RV32ZBB-NEXT:    sll t2, a1, a0
+; RV32ZBB-NEXT:    slli a6, a6, 1
+; RV32ZBB-NEXT:    sll t2, a6, a0
 ; RV32ZBB-NEXT:    mv a6, a2
 ; RV32ZBB-NEXT:    beqz a5, .LBB23_6
 ; RV32ZBB-NEXT:  # %bb.5:
@@ -1602,8 +1602,8 @@ define i64 @rotr_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind {
 ; RV32ZBB-NEXT:    sll a2, a2, a0
 ; RV32ZBB-NEXT:    or a2, a2, t0
 ; RV32ZBB-NEXT:    srl a3, a3, a4
-; RV32ZBB-NEXT:    slli a4, a6, 1
-; RV32ZBB-NEXT:    sll a0, a4, a0
+; RV32ZBB-NEXT:    slli a6, a6, 1
+; RV32ZBB-NEXT:    sll a0, a6, a0
 ; RV32ZBB-NEXT:    or a0, a0, a3
 ; RV32ZBB-NEXT:    add a3, a7, a0
 ; RV32ZBB-NEXT:    add a0, a1, a2

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
index 3cd5e331f34e4..16324c6a2c111 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
@@ -207,8 +207,8 @@ define i64 @ror_i64(i64 %a, i64 %b) nounwind {
 ; CHECK-NEXT:    sll a0, a0, a5
 ; CHECK-NEXT:    or a0, a0, a4
 ; CHECK-NEXT:    srl a1, a1, a2
-; CHECK-NEXT:    slli a2, a3, 1
-; CHECK-NEXT:    sll a2, a2, a5
+; CHECK-NEXT:    slli a3, a3, 1
+; CHECK-NEXT:    sll a2, a3, a5
 ; CHECK-NEXT:    or a1, a2, a1
 ; CHECK-NEXT:    ret
   %or = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %b)
@@ -305,8 +305,8 @@ define i64 @not_shl_one_i64(i64 %x) {
 ; CHECK-NEXT:    neg a4, a3
 ; CHECK-NEXT:    and a2, a4, a2
 ; CHECK-NEXT:    sll a0, a1, a0
-; CHECK-NEXT:    addi a1, a3, -1
-; CHECK-NEXT:    and a1, a1, a0
+; CHECK-NEXT:    addi a3, a3, -1
+; CHECK-NEXT:    and a1, a3, a0
 ; CHECK-NEXT:    not a0, a2
 ; CHECK-NEXT:    not a1, a1
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbkb.ll
index 2f30d8783ff91..a4dbbde0aa2fb 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbkb.ll
@@ -149,8 +149,8 @@ define i64 @packh_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-LABEL: packh_i64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 255
-; RV32I-NEXT:    slli a1, a2, 24
-; RV32I-NEXT:    srli a1, a1, 16
+; RV32I-NEXT:    slli a2, a2, 24
+; RV32I-NEXT:    srli a1, a2, 16
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbs.ll b/llvm/test/CodeGen/RISCV/rv32zbs.ll
index bc2ea049bb863..a57f4964f430f 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbs.ll
@@ -54,8 +54,8 @@ define i64 @bclr_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    neg a6, a5
 ; RV32I-NEXT:    and a4, a6, a4
 ; RV32I-NEXT:    sll a2, a3, a2
-; RV32I-NEXT:    addi a3, a5, -1
-; RV32I-NEXT:    and a2, a3, a2
+; RV32I-NEXT:    addi a5, a5, -1
+; RV32I-NEXT:    and a2, a5, a2
 ; RV32I-NEXT:    not a3, a4
 ; RV32I-NEXT:    not a2, a2
 ; RV32I-NEXT:    and a0, a3, a0
@@ -176,8 +176,8 @@ define signext i64 @bset_i64_zero(i64 signext %a) nounwind {
 ; RV32I-NEXT:    neg a0, a4
 ; RV32I-NEXT:    and a0, a0, a2
 ; RV32I-NEXT:    sll a1, a1, a3
-; RV32I-NEXT:    addi a2, a4, -1
-; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    addi a4, a4, -1
+; RV32I-NEXT:    and a1, a4, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBS-LABEL: bset_i64_zero:

diff  --git a/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll b/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll
index a99bd2843f735..690828c779434 100644
--- a/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll
+++ b/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll
@@ -21,8 +21,8 @@ define i64 @complex_float_add(i64 %a.coerce, i64 %b.coerce) nounwind {
 ; CHECK-NEXT:    mv a1, s1
 ; CHECK-NEXT:    call __addsf3 at plt
 ; CHECK-NEXT:    slli a0, a0, 32
-; CHECK-NEXT:    slli a1, s2, 32
-; CHECK-NEXT:    srli a1, a1, 32
+; CHECK-NEXT:    slli s2, s2, 32
+; CHECK-NEXT:    srli a1, s2, 32
 ; CHECK-NEXT:    or a0, a0, a1
 ; CHECK-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbkb.ll
index 7ea059cb5aa85..5dca3f7a9f0f5 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbkb.ll
@@ -252,8 +252,8 @@ define i64 @pack_i64_allWUsers(i32 signext %0, i32 signext %1, i32 signext %2) {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addw a0, a1, a0
 ; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    slli a1, a2, 32
-; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    slli a2, a2, 32
+; RV64I-NEXT:    srli a1, a2, 32
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
index 39a0d5a323a2f..feff2f292175b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
@@ -648,8 +648,8 @@ define double @extractelt_nxv16f64_neg1(<vscale x 16 x double> %v) {
 ; RV64-NEXT:  # %bb.1:
 ; RV64-NEXT:    mv a2, a1
 ; RV64-NEXT:  .LBB52_2:
-; RV64-NEXT:    slli a1, a2, 3
-; RV64-NEXT:    add a0, a0, a1
+; RV64-NEXT:    slli a2, a2, 3
+; RV64-NEXT:    add a0, a0, a2
 ; RV64-NEXT:    fld fa0, 0(a0)
 ; RV64-NEXT:    addi sp, s0, -80
 ; RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
index 6eeb6a695d39c..3eb3722d14dc0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
@@ -830,8 +830,8 @@ define i64 @extractelt_nxv16i64_neg1(<vscale x 16 x i64> %v) {
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a2, a1
 ; CHECK-NEXT:  .LBB72_2:
-; CHECK-NEXT:    slli a1, a2, 3
-; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    slli a2, a2, 3
+; CHECK-NEXT:    add a0, a0, a2
 ; CHECK-NEXT:    ld a0, 0(a0)
 ; CHECK-NEXT:    addi sp, s0, -80
 ; CHECK-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll
index 531a5d6163038..b10255121d260 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll
@@ -853,9 +853,9 @@ define void @strided_load_startval_add_with_splat(i8* noalias nocapture %0, i8*
 ; CHECK-NEXT:    mv a3, a2
 ; CHECK-NEXT:    bltu a4, a5, .LBB13_5
 ; CHECK-NEXT:  # %bb.2:
-; CHECK-NEXT:    slli a3, a4, 32
-; CHECK-NEXT:    srli a3, a3, 32
-; CHECK-NEXT:    addi a4, a3, 1
+; CHECK-NEXT:    slli a4, a4, 32
+; CHECK-NEXT:    srli a4, a4, 32
+; CHECK-NEXT:    addi a4, a4, 1
 ; CHECK-NEXT:    andi a5, a4, -32
 ; CHECK-NEXT:    add a3, a5, a2
 ; CHECK-NEXT:    slli a6, a2, 2

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll
index c739ecd8158c3..db7535d401f37 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll
@@ -104,8 +104,8 @@ define void @fpext_v8f16_v8f64(<8 x half>* %x, <8 x double>* %y) {
 ; LMULMAX1-NEXT:    vse64.v v8, (a1)
 ; LMULMAX1-NEXT:    addi a0, a1, 48
 ; LMULMAX1-NEXT:    vse64.v v11, (a0)
-; LMULMAX1-NEXT:    addi a0, a1, 16
-; LMULMAX1-NEXT:    vse64.v v9, (a0)
+; LMULMAX1-NEXT:    addi a1, a1, 16
+; LMULMAX1-NEXT:    vse64.v v9, (a1)
 ; LMULMAX1-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
   %d = fpext <8 x half> %a to <8 x double>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
index dc48226300e50..e15326b2b3a31 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
@@ -96,8 +96,8 @@ define void @fp2si_v8f32_v8i32(<8 x float>* %x, <8 x i32>* %y) {
 ; LMULMAX1-NEXT:    vfcvt.rtz.x.f.v v8, v8
 ; LMULMAX1-NEXT:    vfcvt.rtz.x.f.v v9, v9
 ; LMULMAX1-NEXT:    vse32.v v9, (a1)
-; LMULMAX1-NEXT:    addi a0, a1, 16
-; LMULMAX1-NEXT:    vse32.v v8, (a0)
+; LMULMAX1-NEXT:    addi a1, a1, 16
+; LMULMAX1-NEXT:    vse32.v v8, (a1)
 ; LMULMAX1-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
   %d = fptosi <8 x float> %a to <8 x i32>
@@ -123,8 +123,8 @@ define void @fp2ui_v8f32_v8i32(<8 x float>* %x, <8 x i32>* %y) {
 ; LMULMAX1-NEXT:    vfcvt.rtz.xu.f.v v8, v8
 ; LMULMAX1-NEXT:    vfcvt.rtz.xu.f.v v9, v9
 ; LMULMAX1-NEXT:    vse32.v v9, (a1)
-; LMULMAX1-NEXT:    addi a0, a1, 16
-; LMULMAX1-NEXT:    vse32.v v8, (a0)
+; LMULMAX1-NEXT:    addi a1, a1, 16
+; LMULMAX1-NEXT:    vse32.v v8, (a1)
 ; LMULMAX1-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
   %d = fptoui <8 x float> %a to <8 x i32>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
index 94963ec260118..cb71986b5b5e1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
@@ -102,8 +102,8 @@ define void @si2fp_v8i32_v8f32(<8 x i32>* %x, <8 x float>* %y) {
 ; LMULMAX1-NEXT:    vfcvt.f.x.v v8, v8
 ; LMULMAX1-NEXT:    vfcvt.f.x.v v9, v9
 ; LMULMAX1-NEXT:    vse32.v v9, (a1)
-; LMULMAX1-NEXT:    addi a0, a1, 16
-; LMULMAX1-NEXT:    vse32.v v8, (a0)
+; LMULMAX1-NEXT:    addi a1, a1, 16
+; LMULMAX1-NEXT:    vse32.v v8, (a1)
 ; LMULMAX1-NEXT:    ret
   %a = load <8 x i32>, <8 x i32>* %x
   %d = sitofp <8 x i32> %a to <8 x float>
@@ -129,8 +129,8 @@ define void @ui2fp_v8i32_v8f32(<8 x i32>* %x, <8 x float>* %y) {
 ; LMULMAX1-NEXT:    vfcvt.f.xu.v v8, v8
 ; LMULMAX1-NEXT:    vfcvt.f.xu.v v9, v9
 ; LMULMAX1-NEXT:    vse32.v v9, (a1)
-; LMULMAX1-NEXT:    addi a0, a1, 16
-; LMULMAX1-NEXT:    vse32.v v8, (a0)
+; LMULMAX1-NEXT:    addi a1, a1, 16
+; LMULMAX1-NEXT:    vse32.v v8, (a1)
 ; LMULMAX1-NEXT:    ret
   %a = load <8 x i32>, <8 x i32>* %x
   %d = uitofp <8 x i32> %a to <8 x float>
@@ -263,8 +263,8 @@ define void @si2fp_v8i16_v8f64(<8 x i16>* %x, <8 x double>* %y) {
 ; LMULMAX1-NEXT:    vse64.v v8, (a1)
 ; LMULMAX1-NEXT:    addi a0, a1, 48
 ; LMULMAX1-NEXT:    vse64.v v11, (a0)
-; LMULMAX1-NEXT:    addi a0, a1, 16
-; LMULMAX1-NEXT:    vse64.v v9, (a0)
+; LMULMAX1-NEXT:    addi a1, a1, 16
+; LMULMAX1-NEXT:    vse64.v v9, (a1)
 ; LMULMAX1-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
   %d = sitofp <8 x i16> %a to <8 x double>
@@ -307,8 +307,8 @@ define void @ui2fp_v8i16_v8f64(<8 x i16>* %x, <8 x double>* %y) {
 ; LMULMAX1-NEXT:    vse64.v v8, (a1)
 ; LMULMAX1-NEXT:    addi a0, a1, 48
 ; LMULMAX1-NEXT:    vse64.v v11, (a0)
-; LMULMAX1-NEXT:    addi a0, a1, 16
-; LMULMAX1-NEXT:    vse64.v v9, (a0)
+; LMULMAX1-NEXT:    addi a1, a1, 16
+; LMULMAX1-NEXT:    vse64.v v9, (a1)
 ; LMULMAX1-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
   %d = uitofp <8 x i16> %a to <8 x double>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
index fe728c0ad7d65..28d29e820b91a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
@@ -108,8 +108,8 @@ define void @insertelt_v32i16(<32 x i16>* %x, i16 %y, i32 %idx) {
 ; RV64-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
 ; RV64-NEXT:    vle16.v v8, (a0)
 ; RV64-NEXT:    vmv.s.x v12, a1
-; RV64-NEXT:    slli a1, a2, 32
-; RV64-NEXT:    srli a1, a1, 32
+; RV64-NEXT:    slli a2, a2, 32
+; RV64-NEXT:    srli a1, a2, 32
 ; RV64-NEXT:    addi a2, a1, 1
 ; RV64-NEXT:    vsetvli zero, a2, e16, m4, tu, ma
 ; RV64-NEXT:    vslideup.vx v8, v12, a1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
index e2b604609375b..1fba90a33cbc7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
@@ -773,8 +773,8 @@ define void @vadd_vx_v16i64(<16 x i64>* %a, i64 %b, <16 x i64>* %c) {
 ; LMULMAX1-RV32-NEXT:    addi a0, a3, 48
 ; LMULMAX1-RV32-NEXT:    vse64.v v13, (a0)
 ; LMULMAX1-RV32-NEXT:    vse64.v v14, (a3)
-; LMULMAX1-RV32-NEXT:    addi a0, a3, 16
-; LMULMAX1-RV32-NEXT:    vse64.v v15, (a0)
+; LMULMAX1-RV32-NEXT:    addi a3, a3, 16
+; LMULMAX1-RV32-NEXT:    vse64.v v15, (a3)
 ; LMULMAX1-RV32-NEXT:    ret
 ;
 ; LMULMAX8-RV64-LABEL: vadd_vx_v16i64:
@@ -847,8 +847,8 @@ define void @vadd_vx_v16i64(<16 x i64>* %a, i64 %b, <16 x i64>* %c) {
 ; LMULMAX1-RV64-NEXT:    vse64.v v14, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a0, a2, 48
 ; LMULMAX1-RV64-NEXT:    vse64.v v12, (a0)
-; LMULMAX1-RV64-NEXT:    addi a0, a2, 16
-; LMULMAX1-RV64-NEXT:    vse64.v v13, (a0)
+; LMULMAX1-RV64-NEXT:    addi a2, a2, 16
+; LMULMAX1-RV64-NEXT:    vse64.v v13, (a2)
 ; LMULMAX1-RV64-NEXT:    ret
   %va = load <16 x i64>, <16 x i64>* %a
   %head = insertelement <16 x i64> poison, i64 %b, i32 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
index fc845a7adb6ef..0e5b24d242339 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
@@ -208,9 +208,9 @@ define void @splat_concat_low(<4 x i16>* %x, <4 x i16>* %y, <8 x i16>* %z) {
 define void @splat_concat_high(<4 x i16>* %x, <4 x i16>* %y, <8 x i16>* %z) {
 ; CHECK-LABEL: splat_concat_high:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a1, 2
+; CHECK-NEXT:    addi a1, a1, 2
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT:    vlse16.v v8, (a0), zero
+; CHECK-NEXT:    vlse16.v v8, (a1), zero
 ; CHECK-NEXT:    vse16.v v8, (a2)
 ; CHECK-NEXT:    ret
   %a = load <4 x i16>, <4 x i16>* %x

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 03cad39c5a571..84a1d41c7ed4b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -5877,12 +5877,12 @@ define <8 x i64> @mgather_baseidx_zext_v8i16_v8i64(i64* %base, <8 x i16> %idxs,
 ;
 ; RV64ZVE32F-LABEL: mgather_baseidx_zext_v8i16_v8i64:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    lui a3, 16
+; RV64ZVE32F-NEXT:    lui a5, 16
 ; RV64ZVE32F-NEXT:    vsetivli zero, 0, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vmv.x.s a6, v0
-; RV64ZVE32F-NEXT:    andi a4, a6, 1
-; RV64ZVE32F-NEXT:    addiw a5, a3, -1
-; RV64ZVE32F-NEXT:    beqz a4, .LBB53_3
+; RV64ZVE32F-NEXT:    andi a3, a6, 1
+; RV64ZVE32F-NEXT:    addiw a5, a5, -1
+; RV64ZVE32F-NEXT:    beqz a3, .LBB53_3
 ; RV64ZVE32F-NEXT:  # %bb.1: # %cond.load
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vmv.x.s a3, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index a30fe9440f815..57df6eb50f39d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -6005,8 +6005,8 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, i64* %base, <8 x i64> %idxs,
 ; RV64ZVE32F-NEXT:    andi a0, a7, -128
 ; RV64ZVE32F-NEXT:    beqz a0, .LBB51_9
 ; RV64ZVE32F-NEXT:  .LBB51_8: # %cond.store13
-; RV64ZVE32F-NEXT:    slli a0, a5, 3
-; RV64ZVE32F-NEXT:    add a0, a1, a0
+; RV64ZVE32F-NEXT:    slli a5, a5, 3
+; RV64ZVE32F-NEXT:    add a0, a1, a5
 ; RV64ZVE32F-NEXT:    sd a3, 0(a0)
 ; RV64ZVE32F-NEXT:  .LBB51_9: # %else14
 ; RV64ZVE32F-NEXT:    ld s0, 24(sp) # 8-byte Folded Reload
@@ -6024,38 +6024,38 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, i64* %base, <8 x i64> %idxs,
 ; RV64ZVE32F-NEXT:    andi a0, a7, 2
 ; RV64ZVE32F-NEXT:    beqz a0, .LBB51_2
 ; RV64ZVE32F-NEXT:  .LBB51_11: # %cond.store1
-; RV64ZVE32F-NEXT:    slli a0, s2, 3
-; RV64ZVE32F-NEXT:    add a0, a1, a0
+; RV64ZVE32F-NEXT:    slli s2, s2, 3
+; RV64ZVE32F-NEXT:    add a0, a1, s2
 ; RV64ZVE32F-NEXT:    sd s1, 0(a0)
 ; RV64ZVE32F-NEXT:    andi a0, a7, 4
 ; RV64ZVE32F-NEXT:    beqz a0, .LBB51_3
 ; RV64ZVE32F-NEXT:  .LBB51_12: # %cond.store3
-; RV64ZVE32F-NEXT:    slli a0, s0, 3
-; RV64ZVE32F-NEXT:    add a0, a1, a0
+; RV64ZVE32F-NEXT:    slli s0, s0, 3
+; RV64ZVE32F-NEXT:    add a0, a1, s0
 ; RV64ZVE32F-NEXT:    sd t6, 0(a0)
 ; RV64ZVE32F-NEXT:    andi a0, a7, 8
 ; RV64ZVE32F-NEXT:    beqz a0, .LBB51_4
 ; RV64ZVE32F-NEXT:  .LBB51_13: # %cond.store5
-; RV64ZVE32F-NEXT:    slli a0, t5, 3
-; RV64ZVE32F-NEXT:    add a0, a1, a0
+; RV64ZVE32F-NEXT:    slli t5, t5, 3
+; RV64ZVE32F-NEXT:    add a0, a1, t5
 ; RV64ZVE32F-NEXT:    sd t3, 0(a0)
 ; RV64ZVE32F-NEXT:    andi a0, a7, 16
 ; RV64ZVE32F-NEXT:    beqz a0, .LBB51_5
 ; RV64ZVE32F-NEXT:  .LBB51_14: # %cond.store7
-; RV64ZVE32F-NEXT:    slli a0, t4, 3
-; RV64ZVE32F-NEXT:    add a0, a1, a0
+; RV64ZVE32F-NEXT:    slli t4, t4, 3
+; RV64ZVE32F-NEXT:    add a0, a1, t4
 ; RV64ZVE32F-NEXT:    sd t1, 0(a0)
 ; RV64ZVE32F-NEXT:    andi a0, a7, 32
 ; RV64ZVE32F-NEXT:    beqz a0, .LBB51_6
 ; RV64ZVE32F-NEXT:  .LBB51_15: # %cond.store9
-; RV64ZVE32F-NEXT:    slli a0, t2, 3
-; RV64ZVE32F-NEXT:    add a0, a1, a0
+; RV64ZVE32F-NEXT:    slli t2, t2, 3
+; RV64ZVE32F-NEXT:    add a0, a1, t2
 ; RV64ZVE32F-NEXT:    sd a6, 0(a0)
 ; RV64ZVE32F-NEXT:    andi a0, a7, 64
 ; RV64ZVE32F-NEXT:    beqz a0, .LBB51_7
 ; RV64ZVE32F-NEXT:  .LBB51_16: # %cond.store11
-; RV64ZVE32F-NEXT:    slli a0, t0, 3
-; RV64ZVE32F-NEXT:    add a0, a1, a0
+; RV64ZVE32F-NEXT:    slli t0, t0, 3
+; RV64ZVE32F-NEXT:    add a0, a1, t0
 ; RV64ZVE32F-NEXT:    sd a4, 0(a0)
 ; RV64ZVE32F-NEXT:    andi a0, a7, -128
 ; RV64ZVE32F-NEXT:    bnez a0, .LBB51_8
@@ -10701,44 +10701,44 @@ define void @mscatter_baseidx_v8f64(<8 x double> %val, double* %base, <8 x i64>
 ; RV64ZVE32F-NEXT:    andi a1, a3, 2
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB90_2
 ; RV64ZVE32F-NEXT:  .LBB90_10: # %cond.store1
-; RV64ZVE32F-NEXT:    slli a1, t1, 3
-; RV64ZVE32F-NEXT:    add a1, a0, a1
+; RV64ZVE32F-NEXT:    slli t1, t1, 3
+; RV64ZVE32F-NEXT:    add a1, a0, t1
 ; RV64ZVE32F-NEXT:    fsd fa1, 0(a1)
 ; RV64ZVE32F-NEXT:    andi a1, a3, 4
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB90_3
 ; RV64ZVE32F-NEXT:  .LBB90_11: # %cond.store3
-; RV64ZVE32F-NEXT:    slli a1, t0, 3
-; RV64ZVE32F-NEXT:    add a1, a0, a1
+; RV64ZVE32F-NEXT:    slli t0, t0, 3
+; RV64ZVE32F-NEXT:    add a1, a0, t0
 ; RV64ZVE32F-NEXT:    fsd fa2, 0(a1)
 ; RV64ZVE32F-NEXT:    andi a1, a3, 8
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB90_4
 ; RV64ZVE32F-NEXT:  .LBB90_12: # %cond.store5
-; RV64ZVE32F-NEXT:    slli a1, a7, 3
-; RV64ZVE32F-NEXT:    add a1, a0, a1
+; RV64ZVE32F-NEXT:    slli a7, a7, 3
+; RV64ZVE32F-NEXT:    add a1, a0, a7
 ; RV64ZVE32F-NEXT:    fsd fa3, 0(a1)
 ; RV64ZVE32F-NEXT:    andi a1, a3, 16
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB90_5
 ; RV64ZVE32F-NEXT:  .LBB90_13: # %cond.store7
-; RV64ZVE32F-NEXT:    slli a1, a6, 3
-; RV64ZVE32F-NEXT:    add a1, a0, a1
+; RV64ZVE32F-NEXT:    slli a6, a6, 3
+; RV64ZVE32F-NEXT:    add a1, a0, a6
 ; RV64ZVE32F-NEXT:    fsd fa4, 0(a1)
 ; RV64ZVE32F-NEXT:    andi a1, a3, 32
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB90_6
 ; RV64ZVE32F-NEXT:  .LBB90_14: # %cond.store9
-; RV64ZVE32F-NEXT:    slli a1, a5, 3
-; RV64ZVE32F-NEXT:    add a1, a0, a1
+; RV64ZVE32F-NEXT:    slli a5, a5, 3
+; RV64ZVE32F-NEXT:    add a1, a0, a5
 ; RV64ZVE32F-NEXT:    fsd fa5, 0(a1)
 ; RV64ZVE32F-NEXT:    andi a1, a3, 64
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB90_7
 ; RV64ZVE32F-NEXT:  .LBB90_15: # %cond.store11
-; RV64ZVE32F-NEXT:    slli a1, a4, 3
-; RV64ZVE32F-NEXT:    add a1, a0, a1
+; RV64ZVE32F-NEXT:    slli a4, a4, 3
+; RV64ZVE32F-NEXT:    add a1, a0, a4
 ; RV64ZVE32F-NEXT:    fsd fa6, 0(a1)
 ; RV64ZVE32F-NEXT:    andi a1, a3, -128
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB90_8
 ; RV64ZVE32F-NEXT:  .LBB90_16: # %cond.store13
-; RV64ZVE32F-NEXT:    slli a1, a2, 3
-; RV64ZVE32F-NEXT:    add a0, a0, a1
+; RV64ZVE32F-NEXT:    slli a2, a2, 3
+; RV64ZVE32F-NEXT:    add a0, a0, a2
 ; RV64ZVE32F-NEXT:    fsd fa7, 0(a0)
 ; RV64ZVE32F-NEXT:    ret
   %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %idxs

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
index ad311b52015ff..39f40b27ab0b1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
@@ -423,9 +423,9 @@ define void @masked_load_v2i32_align1(<2 x i32>* %a, <2 x i32> %m, <2 x i32>* %r
 ; RV32-NEXT:    lbu a6, 3(a0)
 ; RV32-NEXT:    slli a3, a3, 8
 ; RV32-NEXT:    or a3, a3, a4
-; RV32-NEXT:    slli a4, a5, 16
-; RV32-NEXT:    slli a5, a6, 24
-; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    slli a5, a5, 16
+; RV32-NEXT:    slli a6, a6, 24
+; RV32-NEXT:    or a4, a6, a5
 ; RV32-NEXT:    or a3, a4, a3
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; RV32-NEXT:    vmv.v.x v8, a3
@@ -444,9 +444,9 @@ define void @masked_load_v2i32_align1(<2 x i32>* %a, <2 x i32> %m, <2 x i32>* %r
 ; RV32-NEXT:    lbu a0, 7(a0)
 ; RV32-NEXT:    slli a2, a2, 8
 ; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a3, a4, 16
+; RV32-NEXT:    slli a4, a4, 16
 ; RV32-NEXT:    slli a0, a0, 24
-; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    or a0, a0, a4
 ; RV32-NEXT:    or a0, a0, a2
 ; RV32-NEXT:    vmv.s.x v9, a0
 ; RV32-NEXT:    vsetvli zero, zero, e32, mf2, tu, ma
@@ -471,9 +471,9 @@ define void @masked_load_v2i32_align1(<2 x i32>* %a, <2 x i32> %m, <2 x i32>* %r
 ; RV64-NEXT:    lb a6, 3(a0)
 ; RV64-NEXT:    slli a3, a3, 8
 ; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a4, a5, 16
-; RV64-NEXT:    slli a5, a6, 24
-; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
 ; RV64-NEXT:    or a3, a4, a3
 ; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; RV64-NEXT:    vmv.v.x v8, a3
@@ -492,9 +492,9 @@ define void @masked_load_v2i32_align1(<2 x i32>* %a, <2 x i32> %m, <2 x i32>* %r
 ; RV64-NEXT:    lb a0, 7(a0)
 ; RV64-NEXT:    slli a2, a2, 8
 ; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a3, a4, 16
+; RV64-NEXT:    slli a4, a4, 16
 ; RV64-NEXT:    slli a0, a0, 24
-; RV64-NEXT:    or a0, a0, a3
+; RV64-NEXT:    or a0, a0, a4
 ; RV64-NEXT:    or a0, a0, a2
 ; RV64-NEXT:    vmv.s.x v9, a0
 ; RV64-NEXT:    vsetvli zero, zero, e32, mf2, tu, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
index 46b8085d7fe77..0e40c22e00cf6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
@@ -659,8 +659,8 @@ define <vscale x 1 x i64> @insertelt_nxv1i64_idx(<vscale x 1 x i64> %v, i64 %elt
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    slli a0, a1, 32
-; CHECK-NEXT:    srli a0, a0, 32
+; CHECK-NEXT:    slli a1, a1, 32
+; CHECK-NEXT:    srli a0, a1, 32
 ; CHECK-NEXT:    addi a1, a0, 1
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
@@ -696,8 +696,8 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_idx(<vscale x 2 x i64> %v, i64 %elt
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    slli a0, a1, 32
-; CHECK-NEXT:    srli a0, a0, 32
+; CHECK-NEXT:    slli a1, a1, 32
+; CHECK-NEXT:    srli a0, a1, 32
 ; CHECK-NEXT:    addi a1, a0, 1
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
@@ -733,8 +733,8 @@ define <vscale x 4 x i64> @insertelt_nxv4i64_idx(<vscale x 4 x i64> %v, i64 %elt
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v12, a0
-; CHECK-NEXT:    slli a0, a1, 32
-; CHECK-NEXT:    srli a0, a0, 32
+; CHECK-NEXT:    slli a1, a1, 32
+; CHECK-NEXT:    srli a0, a1, 32
 ; CHECK-NEXT:    addi a1, a0, 1
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
@@ -770,8 +770,8 @@ define <vscale x 8 x i64> @insertelt_nxv8i64_idx(<vscale x 8 x i64> %v, i64 %elt
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv.s.x v16, a0
-; CHECK-NEXT:    slli a0, a1, 32
-; CHECK-NEXT:    srli a0, a0, 32
+; CHECK-NEXT:    slli a1, a1, 32
+; CHECK-NEXT:    srli a0, a1, 32
 ; CHECK-NEXT:    addi a1, a0, 1
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index 9319bd4a5a081..3aa4febc499c7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -2174,9 +2174,9 @@ define void @sink_splat_fma_scalable(float* noalias nocapture %a, float* noalias
 ; CHECK-NEXT:    beqz a5, .LBB34_7
 ; CHECK-NEXT:  .LBB34_5: # %for.body.preheader
 ; CHECK-NEXT:    addi a2, a4, -1024
-; CHECK-NEXT:    slli a3, a4, 2
-; CHECK-NEXT:    add a1, a1, a3
-; CHECK-NEXT:    add a0, a0, a3
+; CHECK-NEXT:    slli a4, a4, 2
+; CHECK-NEXT:    add a1, a1, a4
+; CHECK-NEXT:    add a0, a0, a4
 ; CHECK-NEXT:  .LBB34_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    flw ft0, 0(a0)
@@ -2277,9 +2277,9 @@ define void @sink_splat_fma_commute_scalable(float* noalias nocapture %a, float*
 ; CHECK-NEXT:    beqz a5, .LBB35_7
 ; CHECK-NEXT:  .LBB35_5: # %for.body.preheader
 ; CHECK-NEXT:    addi a2, a4, -1024
-; CHECK-NEXT:    slli a3, a4, 2
-; CHECK-NEXT:    add a1, a1, a3
-; CHECK-NEXT:    add a0, a0, a3
+; CHECK-NEXT:    slli a4, a4, 2
+; CHECK-NEXT:    add a1, a1, a4
+; CHECK-NEXT:    add a0, a0, a4
 ; CHECK-NEXT:  .LBB35_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    flw ft0, 0(a0)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
index df8183c0a2b58..1b729921113b1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
@@ -446,8 +446,8 @@ define void @vpstore_nxv17f64(<vscale x 17 x double> %val, <vscale x 17 x double
 ; CHECK-NEXT:    srli a2, a3, 2
 ; CHECK-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v0, v24, a2
-; CHECK-NEXT:    slli a2, a3, 4
-; CHECK-NEXT:    add a1, a1, a2
+; CHECK-NEXT:    slli a3, a3, 4
+; CHECK-NEXT:    add a1, a1, a3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    addi a0, sp, 16
 ; CHECK-NEXT:    vl8re8.v v8, (a0) # Unknown-size Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
index dae2708bc5858..8aabd6addea5e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
@@ -93,11 +93,11 @@ declare half @llvm.vp.reduce.fadd.nxv64f16(half, <vscale x 64 x half>, <vscale x
 define half @vpreduce_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscale x 64 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpreduce_fadd_nxv64f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    srli a2, a1, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a1, a2, 1
 ; CHECK-NEXT:    vsetvli a3, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v24, v0, a2
-; CHECK-NEXT:    slli a2, a1, 2
+; CHECK-NEXT:    vslidedown.vx v24, v0, a1
+; CHECK-NEXT:    slli a2, a2, 2
 ; CHECK-NEXT:    sub a1, a0, a2
 ; CHECK-NEXT:    sltu a3, a0, a1
 ; CHECK-NEXT:    addi a3, a3, -1
@@ -125,11 +125,11 @@ define half @vpreduce_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscale x
 define half @vpreduce_ord_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscale x 64 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpreduce_ord_fadd_nxv64f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    srli a2, a1, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a1, a2, 1
 ; CHECK-NEXT:    vsetvli a3, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v24, v0, a2
-; CHECK-NEXT:    slli a2, a1, 2
+; CHECK-NEXT:    vslidedown.vx v24, v0, a1
+; CHECK-NEXT:    slli a2, a2, 2
 ; CHECK-NEXT:    sub a1, a0, a2
 ; CHECK-NEXT:    sltu a3, a0, a1
 ; CHECK-NEXT:    addi a3, a3, -1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
index 731a226eed6a8..6457f0afe49b1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
@@ -1153,11 +1153,11 @@ declare i32 @llvm.vp.reduce.umax.nxv32i32(i32, <vscale x 32 x i32>, <vscale x 32
 define signext i32 @vpreduce_umax_nxv32i32(i32 signext %s, <vscale x 32 x i32> %v, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vpreduce_umax_nxv32i32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    csrr a2, vlenb
-; RV32-NEXT:    srli a3, a2, 2
+; RV32-NEXT:    csrr a3, vlenb
+; RV32-NEXT:    srli a2, a3, 2
 ; RV32-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; RV32-NEXT:    vslidedown.vx v24, v0, a3
-; RV32-NEXT:    slli a3, a2, 1
+; RV32-NEXT:    vslidedown.vx v24, v0, a2
+; RV32-NEXT:    slli a3, a3, 1
 ; RV32-NEXT:    sub a2, a1, a3
 ; RV32-NEXT:    sltu a4, a1, a2
 ; RV32-NEXT:    addi a4, a4, -1

diff  --git a/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll b/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
index 653679c4bfc13..0219ab1f241b6 100644
--- a/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
+++ b/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
@@ -64,12 +64,12 @@ define i128 @cmovcc128(i64 signext %a, i128 %b, i128 %c) nounwind {
 ; RV32I-NEXT:    lw a6, 0(a6)
 ; RV32I-NEXT:    beqz a1, .LBB1_9
 ; RV32I-NEXT:  # %bb.8: # %entry
-; RV32I-NEXT:    addi a1, a4, 12
+; RV32I-NEXT:    addi a3, a4, 12
 ; RV32I-NEXT:    j .LBB1_10
 ; RV32I-NEXT:  .LBB1_9:
-; RV32I-NEXT:    addi a1, a3, 12
+; RV32I-NEXT:    addi a3, a3, 12
 ; RV32I-NEXT:  .LBB1_10: # %entry
-; RV32I-NEXT:    lw a1, 0(a1)
+; RV32I-NEXT:    lw a1, 0(a3)
 ; RV32I-NEXT:    sw a1, 12(a0)
 ; RV32I-NEXT:    sw a6, 8(a0)
 ; RV32I-NEXT:    sw a5, 4(a0)
@@ -147,12 +147,12 @@ define i128 @cmov128(i1 %a, i128 %b, i128 %c) nounwind {
 ; RV32I-NEXT:    lw a6, 0(a6)
 ; RV32I-NEXT:    bnez a1, .LBB3_9
 ; RV32I-NEXT:  # %bb.8: # %entry
-; RV32I-NEXT:    addi a1, a3, 12
+; RV32I-NEXT:    addi a2, a3, 12
 ; RV32I-NEXT:    j .LBB3_10
 ; RV32I-NEXT:  .LBB3_9:
-; RV32I-NEXT:    addi a1, a2, 12
+; RV32I-NEXT:    addi a2, a2, 12
 ; RV32I-NEXT:  .LBB3_10: # %entry
-; RV32I-NEXT:    lw a1, 0(a1)
+; RV32I-NEXT:    lw a1, 0(a2)
 ; RV32I-NEXT:    sw a1, 12(a0)
 ; RV32I-NEXT:    sw a6, 8(a0)
 ; RV32I-NEXT:    sw a5, 4(a0)

diff  --git a/llvm/test/CodeGen/RISCV/shifts.ll b/llvm/test/CodeGen/RISCV/shifts.ll
index e1c36448fce12..4b33c18f9ff72 100644
--- a/llvm/test/CodeGen/RISCV/shifts.ll
+++ b/llvm/test/CodeGen/RISCV/shifts.ll
@@ -563,8 +563,8 @@ define i64 @fshr64_minsize(i64 %a, i64 %b) minsize nounwind {
 ; RV32I-NEXT:    sll a0, a0, a5
 ; RV32I-NEXT:    or a0, a0, a4
 ; RV32I-NEXT:    srl a1, a1, a2
-; RV32I-NEXT:    slli a2, a3, 1
-; RV32I-NEXT:    sll a2, a2, a5
+; RV32I-NEXT:    slli a3, a3, 1
+; RV32I-NEXT:    sll a2, a3, a5
 ; RV32I-NEXT:    or a1, a2, a1
 ; RV32I-NEXT:    ret
 ;
@@ -631,8 +631,8 @@ define i128 @fshr128_minsize(i128 %a, i128 %b) minsize nounwind {
 ; RV32I-NEXT:    sll a4, a4, t1
 ; RV32I-NEXT:    or a3, a4, a3
 ; RV32I-NEXT:    srl a1, a1, a2
-; RV32I-NEXT:    slli a2, a5, 1
-; RV32I-NEXT:    sll a2, a2, t1
+; RV32I-NEXT:    slli a5, a5, 1
+; RV32I-NEXT:    sll a2, a5, t1
 ; RV32I-NEXT:    or a1, a2, a1
 ; RV32I-NEXT:    sw a1, 12(a0)
 ; RV32I-NEXT:    sw a3, 8(a0)
@@ -665,8 +665,8 @@ define i128 @fshr128_minsize(i128 %a, i128 %b) minsize nounwind {
 ; RV64I-NEXT:    sll a0, a0, a5
 ; RV64I-NEXT:    or a0, a0, a4
 ; RV64I-NEXT:    srl a1, a1, a2
-; RV64I-NEXT:    slli a2, a3, 1
-; RV64I-NEXT:    sll a2, a2, a5
+; RV64I-NEXT:    slli a3, a3, 1
+; RV64I-NEXT:    sll a2, a3, a5
 ; RV64I-NEXT:    or a1, a2, a1
 ; RV64I-NEXT:    ret
   %res = tail call i128 @llvm.fshr.i128(i128 %a, i128 %a, i128 %b)

diff  --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index 819b5602ea0f4..b2aa4f7d97e38 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -325,8 +325,8 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32-NEXT:    srli a0, a0, 1
 ; RV32-NEXT:    or a0, a0, a1
 ; RV32-NEXT:    lw s4, 0(s0)
-; RV32-NEXT:    slli a1, a2, 30
-; RV32-NEXT:    srli a1, a1, 31
+; RV32-NEXT:    slli a2, a2, 30
+; RV32-NEXT:    srli a1, a2, 31
 ; RV32-NEXT:    neg a1, a1
 ; RV32-NEXT:    li a2, 7
 ; RV32-NEXT:    li a3, 0
@@ -353,21 +353,21 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32-NEXT:    seqz a3, a3
 ; RV32-NEXT:    or a0, a0, a1
 ; RV32-NEXT:    snez a0, a0
-; RV32-NEXT:    addi a1, a3, -1
+; RV32-NEXT:    addi a3, a3, -1
 ; RV32-NEXT:    addi a2, a2, -1
-; RV32-NEXT:    neg a3, a0
-; RV32-NEXT:    sw a3, 0(s0)
-; RV32-NEXT:    andi a3, a2, 7
-; RV32-NEXT:    sb a3, 12(s0)
-; RV32-NEXT:    slli a3, a1, 1
-; RV32-NEXT:    or a0, a3, a0
+; RV32-NEXT:    neg a1, a0
+; RV32-NEXT:    sw a1, 0(s0)
+; RV32-NEXT:    andi a1, a2, 7
+; RV32-NEXT:    sb a1, 12(s0)
+; RV32-NEXT:    slli a1, a3, 1
+; RV32-NEXT:    or a0, a1, a0
 ; RV32-NEXT:    sw a0, 4(s0)
-; RV32-NEXT:    srli a0, a1, 31
-; RV32-NEXT:    andi a1, a1, 1
+; RV32-NEXT:    srli a0, a3, 31
+; RV32-NEXT:    andi a1, a3, 1
 ; RV32-NEXT:    slli a1, a1, 1
 ; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    slli a1, a2, 2
-; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    slli a2, a2, 2
+; RV32-NEXT:    or a0, a0, a2
 ; RV32-NEXT:    sw a0, 8(s0)
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -400,8 +400,8 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64-NEXT:    slli a1, a1, 62
 ; RV64-NEXT:    or a0, a1, a0
 ; RV64-NEXT:    srai a0, a0, 31
-; RV64-NEXT:    slli a1, a2, 31
-; RV64-NEXT:    srai s2, a1, 31
+; RV64-NEXT:    slli a2, a2, 31
+; RV64-NEXT:    srai s2, a2, 31
 ; RV64-NEXT:    li a1, 7
 ; RV64-NEXT:    call __moddi3 at plt
 ; RV64-NEXT:    mv s3, a0
@@ -420,10 +420,10 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64-NEXT:    srli a0, a0, 1
 ; RV64-NEXT:    or a0, a0, a2
 ; RV64-NEXT:    sltu a0, a1, a0
-; RV64-NEXT:    addi a1, s1, -2
-; RV64-NEXT:    seqz a1, a1
-; RV64-NEXT:    addi a2, s3, -1
-; RV64-NEXT:    seqz a2, a2
+; RV64-NEXT:    addi s1, s1, -2
+; RV64-NEXT:    seqz a1, s1
+; RV64-NEXT:    addi s3, s3, -1
+; RV64-NEXT:    seqz a2, s3
 ; RV64-NEXT:    neg a0, a0
 ; RV64-NEXT:    addi a2, a2, -1
 ; RV64-NEXT:    addi a1, a1, -1
@@ -437,8 +437,8 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64-NEXT:    sb a1, 12(s0)
 ; RV64-NEXT:    slli a0, a0, 31
 ; RV64-NEXT:    srli a0, a0, 31
-; RV64-NEXT:    slli a1, a2, 33
-; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    slli a2, a2, 33
+; RV64-NEXT:    or a0, a0, a2
 ; RV64-NEXT:    sd a0, 0(s0)
 ; RV64-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -475,8 +475,8 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32M-NEXT:    srli a0, a0, 1
 ; RV32M-NEXT:    or a0, a0, a1
 ; RV32M-NEXT:    lw s4, 0(s0)
-; RV32M-NEXT:    slli a1, a2, 30
-; RV32M-NEXT:    srli a1, a1, 31
+; RV32M-NEXT:    slli a2, a2, 30
+; RV32M-NEXT:    srli a1, a2, 31
 ; RV32M-NEXT:    neg a1, a1
 ; RV32M-NEXT:    li a2, 7
 ; RV32M-NEXT:    li a3, 0
@@ -503,21 +503,21 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32M-NEXT:    seqz a3, a3
 ; RV32M-NEXT:    or a0, a0, a1
 ; RV32M-NEXT:    snez a0, a0
-; RV32M-NEXT:    addi a1, a3, -1
+; RV32M-NEXT:    addi a3, a3, -1
 ; RV32M-NEXT:    addi a2, a2, -1
-; RV32M-NEXT:    neg a3, a0
-; RV32M-NEXT:    sw a3, 0(s0)
-; RV32M-NEXT:    andi a3, a2, 7
-; RV32M-NEXT:    sb a3, 12(s0)
-; RV32M-NEXT:    slli a3, a1, 1
-; RV32M-NEXT:    or a0, a3, a0
+; RV32M-NEXT:    neg a1, a0
+; RV32M-NEXT:    sw a1, 0(s0)
+; RV32M-NEXT:    andi a1, a2, 7
+; RV32M-NEXT:    sb a1, 12(s0)
+; RV32M-NEXT:    slli a1, a3, 1
+; RV32M-NEXT:    or a0, a1, a0
 ; RV32M-NEXT:    sw a0, 4(s0)
-; RV32M-NEXT:    srli a0, a1, 31
-; RV32M-NEXT:    andi a1, a1, 1
+; RV32M-NEXT:    srli a0, a3, 31
+; RV32M-NEXT:    andi a1, a3, 1
 ; RV32M-NEXT:    slli a1, a1, 1
 ; RV32M-NEXT:    or a0, a0, a1
-; RV32M-NEXT:    slli a1, a2, 2
-; RV32M-NEXT:    or a0, a0, a1
+; RV32M-NEXT:    slli a2, a2, 2
+; RV32M-NEXT:    or a0, a0, a2
 ; RV32M-NEXT:    sw a0, 8(s0)
 ; RV32M-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32M-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -590,8 +590,8 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64M-NEXT:    srli a3, a3, 62
 ; RV64M-NEXT:    or a1, a3, a1
 ; RV64M-NEXT:    sw a1, 8(a0)
-; RV64M-NEXT:    slli a1, a2, 29
-; RV64M-NEXT:    srli a1, a1, 61
+; RV64M-NEXT:    slli a2, a2, 29
+; RV64M-NEXT:    srli a1, a2, 61
 ; RV64M-NEXT:    sb a1, 12(a0)
 ; RV64M-NEXT:    ret
 ;
@@ -620,8 +620,8 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32MV-NEXT:    slli a3, a2, 30
 ; RV32MV-NEXT:    srli a0, a0, 2
 ; RV32MV-NEXT:    or s5, a0, a3
-; RV32MV-NEXT:    slli a0, a2, 29
-; RV32MV-NEXT:    srli a2, a0, 31
+; RV32MV-NEXT:    slli a2, a2, 29
+; RV32MV-NEXT:    srli a2, a2, 31
 ; RV32MV-NEXT:    lw a0, 0(s2)
 ; RV32MV-NEXT:    neg s6, a2
 ; RV32MV-NEXT:    andi a1, a1, 1
@@ -779,10 +779,10 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64MV-NEXT:    slli a4, a3, 33
 ; RV64MV-NEXT:    or a1, a1, a4
 ; RV64MV-NEXT:    sd a1, 0(a0)
-; RV64MV-NEXT:    slli a1, a2, 2
-; RV64MV-NEXT:    slli a2, a3, 31
-; RV64MV-NEXT:    srli a2, a2, 62
-; RV64MV-NEXT:    or a1, a2, a1
+; RV64MV-NEXT:    slli a2, a2, 2
+; RV64MV-NEXT:    slli a3, a3, 31
+; RV64MV-NEXT:    srli a1, a3, 62
+; RV64MV-NEXT:    or a1, a1, a2
 ; RV64MV-NEXT:    sw a1, 8(a0)
 ; RV64MV-NEXT:    addi sp, s0, -64
 ; RV64MV-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
index cf657bea4187c..ce45c115b3659 100644
--- a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
@@ -68,9 +68,9 @@ define i32 @load_i32(i32* %p) {
 ; RV32I-NEXT:    lbu a0, 3(a0)
 ; RV32I-NEXT:    slli a1, a1, 8
 ; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    slli a2, a3, 16
+; RV32I-NEXT:    slli a3, a3, 16
 ; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    or a0, a0, a3
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -82,9 +82,9 @@ define i32 @load_i32(i32* %p) {
 ; RV64I-NEXT:    lb a0, 3(a0)
 ; RV64I-NEXT:    slli a1, a1, 8
 ; RV64I-NEXT:    or a1, a1, a2
-; RV64I-NEXT:    slli a2, a3, 16
+; RV64I-NEXT:    slli a3, a3, 16
 ; RV64I-NEXT:    slli a0, a0, 24
-; RV64I-NEXT:    or a0, a0, a2
+; RV64I-NEXT:    or a0, a0, a3
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -105,9 +105,9 @@ define i64 @load_i64(i64* %p) {
 ; RV32I-NEXT:    lbu a4, 3(a0)
 ; RV32I-NEXT:    slli a1, a1, 8
 ; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    slli a2, a3, 16
-; RV32I-NEXT:    slli a3, a4, 24
-; RV32I-NEXT:    or a2, a3, a2
+; RV32I-NEXT:    slli a3, a3, 16
+; RV32I-NEXT:    slli a4, a4, 24
+; RV32I-NEXT:    or a2, a4, a3
 ; RV32I-NEXT:    or a2, a2, a1
 ; RV32I-NEXT:    lbu a1, 5(a0)
 ; RV32I-NEXT:    lbu a3, 4(a0)
@@ -115,9 +115,9 @@ define i64 @load_i64(i64* %p) {
 ; RV32I-NEXT:    lbu a0, 7(a0)
 ; RV32I-NEXT:    slli a1, a1, 8
 ; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    slli a3, a4, 16
+; RV32I-NEXT:    slli a4, a4, 16
 ; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    or a0, a0, a3
+; RV32I-NEXT:    or a0, a0, a4
 ; RV32I-NEXT:    or a1, a0, a1
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    ret
@@ -130,9 +130,9 @@ define i64 @load_i64(i64* %p) {
 ; RV64I-NEXT:    lbu a4, 3(a0)
 ; RV64I-NEXT:    slli a1, a1, 8
 ; RV64I-NEXT:    or a1, a1, a2
-; RV64I-NEXT:    slli a2, a3, 16
-; RV64I-NEXT:    slli a3, a4, 24
-; RV64I-NEXT:    or a2, a3, a2
+; RV64I-NEXT:    slli a3, a3, 16
+; RV64I-NEXT:    slli a4, a4, 24
+; RV64I-NEXT:    or a2, a4, a3
 ; RV64I-NEXT:    or a1, a2, a1
 ; RV64I-NEXT:    lbu a2, 5(a0)
 ; RV64I-NEXT:    lbu a3, 4(a0)
@@ -140,9 +140,9 @@ define i64 @load_i64(i64* %p) {
 ; RV64I-NEXT:    lbu a0, 7(a0)
 ; RV64I-NEXT:    slli a2, a2, 8
 ; RV64I-NEXT:    or a2, a2, a3
-; RV64I-NEXT:    slli a3, a4, 16
+; RV64I-NEXT:    slli a4, a4, 16
 ; RV64I-NEXT:    slli a0, a0, 24
-; RV64I-NEXT:    or a0, a0, a3
+; RV64I-NEXT:    or a0, a0, a4
 ; RV64I-NEXT:    or a0, a0, a2
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    or a0, a0, a1

diff  --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
index 0efe655c06902..b5b1901d778b7 100644
--- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
@@ -357,18 +357,18 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV32-NEXT:    addi a0, a0, -1463
 ; RV32-NEXT:    andi a0, a0, 2047
 ; RV32-NEXT:    sltiu a0, a0, 293
-; RV32-NEXT:    addi a1, s3, -1
+; RV32-NEXT:    addi s3, s3, -1
 ; RV32-NEXT:    addi a0, a0, -1
-; RV32-NEXT:    addi a2, s1, -1
-; RV32-NEXT:    slli a3, a2, 21
-; RV32-NEXT:    srli a3, a3, 31
-; RV32-NEXT:    sb a3, 4(s0)
-; RV32-NEXT:    andi a1, a1, 2047
+; RV32-NEXT:    addi s1, s1, -1
+; RV32-NEXT:    slli a1, s1, 21
+; RV32-NEXT:    srli a1, a1, 31
+; RV32-NEXT:    sb a1, 4(s0)
+; RV32-NEXT:    andi a1, s3, 2047
 ; RV32-NEXT:    andi a0, a0, 2047
 ; RV32-NEXT:    slli a0, a0, 11
 ; RV32-NEXT:    or a0, a1, a0
-; RV32-NEXT:    slli a1, a2, 22
-; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    slli s1, s1, 22
+; RV32-NEXT:    or a0, a0, s1
 ; RV32-NEXT:    sw a0, 0(s0)
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -414,11 +414,11 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV64-NEXT:    addiw a0, a0, -1638
 ; RV64-NEXT:    andi a0, a0, 2047
 ; RV64-NEXT:    sltiu a0, a0, 2
-; RV64-NEXT:    addiw a1, s3, -1
+; RV64-NEXT:    addiw s3, s3, -1
 ; RV64-NEXT:    addi a0, a0, -1
-; RV64-NEXT:    addiw a2, s2, -1
-; RV64-NEXT:    andi a1, a1, 2047
-; RV64-NEXT:    andi a2, a2, 2047
+; RV64-NEXT:    addiw s2, s2, -1
+; RV64-NEXT:    andi a1, s3, 2047
+; RV64-NEXT:    andi a2, s2, 2047
 ; RV64-NEXT:    slli a2, a2, 11
 ; RV64-NEXT:    or a1, a1, a2
 ; RV64-NEXT:    slli a0, a0, 22

diff  --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll
index 123846092905c..3eda084804092 100644
--- a/llvm/test/CodeGen/RISCV/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/vararg.ll
@@ -459,8 +459,8 @@ define void @va1_caller() nounwind {
 ; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -16
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-FPELIM-NEXT:    li a0, 1023
-; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a1, a0, 52
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a1, 1023
+; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a1, a1, 52
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    li a2, 2
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    call va1 at plt
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -473,8 +473,8 @@ define void @va1_caller() nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 16
-; LP64-LP64F-LP64D-WITHFP-NEXT:    li a0, 1023
-; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a1, a0, 52
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a1, 1023
+; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a1, a1, 52
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    li a2, 2
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    call va1 at plt
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -775,8 +775,8 @@ define void @va2_caller() nounwind {
 ; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -16
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-FPELIM-NEXT:    li a0, 1023
-; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a1, a0, 52
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a1, 1023
+; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a1, a1, 52
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    call va2 at plt
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 16
@@ -788,8 +788,8 @@ define void @va2_caller() nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 16
-; LP64-LP64F-LP64D-WITHFP-NEXT:    li a0, 1023
-; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a1, a0, 52
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a1, 1023
+; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a1, a1, 52
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    call va2 at plt
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
@@ -1110,8 +1110,8 @@ define void @va3_caller() nounwind {
 ; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -16
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-FPELIM-NEXT:    li a0, 1
-; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a2, a0, 62
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a2, 1
+; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a2, a2, 62
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    li a0, 2
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    li a1, 1111
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    call va3 at plt
@@ -1125,8 +1125,8 @@ define void @va3_caller() nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 16
-; LP64-LP64F-LP64D-WITHFP-NEXT:    li a0, 1
-; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a2, a0, 62
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a2, 1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a2, a2, 62
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    li a0, 2
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    li a1, 1111
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    call va3 at plt
@@ -1527,8 +1527,8 @@ define void @va5_aligned_stack_caller() nounwind {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    lui a0, %hi(.LCPI11_2)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ld a3, %lo(.LCPI11_2)(a0)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    lui a0, 2384
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addiw a0, a0, 761
-; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a6, a0, 11
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addiw a6, a0, 761
+; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a6, a6, 11
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    li a0, 1
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    li a1, 11
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    li a4, 12
@@ -1559,8 +1559,8 @@ define void @va5_aligned_stack_caller() nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    lui a0, %hi(.LCPI11_2)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld a3, %lo(.LCPI11_2)(a0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    lui a0, 2384
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addiw a0, a0, 761
-; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a6, a0, 11
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addiw a6, a0, 761
+; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a6, a6, 11
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    li a0, 1
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    li a1, 11
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    li a4, 12

diff  --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll
index f6963fd674d3e..b0641c73f0d9c 100644
--- a/llvm/test/CodeGen/RISCV/xaluo.ll
+++ b/llvm/test/CodeGen/RISCV/xaluo.ll
@@ -4018,9 +4018,9 @@ define zeroext i1 @uaddo.i64.constant_2048(i64 %v1, i64* %res) {
 ; RV32-LABEL: uaddo.i64.constant_2048:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    mv a3, a0
-; RV32-NEXT:    addi a0, a0, 2047
-; RV32-NEXT:    addi a4, a0, 1
-; RV32-NEXT:    sltu a0, a4, a3
+; RV32-NEXT:    addi a4, a0, 2047
+; RV32-NEXT:    addi a4, a4, 1
+; RV32-NEXT:    sltu a0, a4, a0
 ; RV32-NEXT:    add a5, a1, a0
 ; RV32-NEXT:    bgeu a4, a3, .LBB67_2
 ; RV32-NEXT:  # %bb.1: # %entry
@@ -4041,9 +4041,9 @@ define zeroext i1 @uaddo.i64.constant_2048(i64 %v1, i64* %res) {
 ; RV32ZBA-LABEL: uaddo.i64.constant_2048:
 ; RV32ZBA:       # %bb.0: # %entry
 ; RV32ZBA-NEXT:    mv a3, a0
-; RV32ZBA-NEXT:    addi a0, a0, 2047
-; RV32ZBA-NEXT:    addi a4, a0, 1
-; RV32ZBA-NEXT:    sltu a0, a4, a3
+; RV32ZBA-NEXT:    addi a4, a0, 2047
+; RV32ZBA-NEXT:    addi a4, a4, 1
+; RV32ZBA-NEXT:    sltu a0, a4, a0
 ; RV32ZBA-NEXT:    add a5, a1, a0
 ; RV32ZBA-NEXT:    bgeu a4, a3, .LBB67_2
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
@@ -4072,9 +4072,9 @@ define zeroext i1 @uaddo.i64.constant_2049(i64 %v1, i64* %res) {
 ; RV32-LABEL: uaddo.i64.constant_2049:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    mv a3, a0
-; RV32-NEXT:    addi a0, a0, 2047
-; RV32-NEXT:    addi a4, a0, 2
-; RV32-NEXT:    sltu a0, a4, a3
+; RV32-NEXT:    addi a4, a0, 2047
+; RV32-NEXT:    addi a4, a4, 2
+; RV32-NEXT:    sltu a0, a4, a0
 ; RV32-NEXT:    add a5, a1, a0
 ; RV32-NEXT:    bgeu a4, a3, .LBB68_2
 ; RV32-NEXT:  # %bb.1: # %entry
@@ -4095,9 +4095,9 @@ define zeroext i1 @uaddo.i64.constant_2049(i64 %v1, i64* %res) {
 ; RV32ZBA-LABEL: uaddo.i64.constant_2049:
 ; RV32ZBA:       # %bb.0: # %entry
 ; RV32ZBA-NEXT:    mv a3, a0
-; RV32ZBA-NEXT:    addi a0, a0, 2047
-; RV32ZBA-NEXT:    addi a4, a0, 2
-; RV32ZBA-NEXT:    sltu a0, a4, a3
+; RV32ZBA-NEXT:    addi a4, a0, 2047
+; RV32ZBA-NEXT:    addi a4, a4, 2
+; RV32ZBA-NEXT:    sltu a0, a4, a0
 ; RV32ZBA-NEXT:    add a5, a1, a0
 ; RV32ZBA-NEXT:    bgeu a4, a3, .LBB68_2
 ; RV32ZBA-NEXT:  # %bb.1: # %entry


        


More information about the llvm-commits mailing list