[llvm] e00e20a - [RISCV] Add ADDW/AND/OR/XOR/SUB/SUBW to getRegAllocHints.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 1 11:09:47 PST 2022


Author: Craig Topper
Date: 2022-12-01T11:09:38-08:00
New Revision: e00e20a055fcd7386434f237784d845d5ddfc1b1

URL: https://github.com/llvm/llvm-project/commit/e00e20a055fcd7386434f237784d845d5ddfc1b1
DIFF: https://github.com/llvm/llvm-project/commit/e00e20a055fcd7386434f237784d845d5ddfc1b1.diff

LOG: [RISCV] Add ADDW/AND/OR/XOR/SUB/SUBW to getRegAllocHints.

These instructions requires both register operands to be compressible
so I've only applied the hint if we already have a GPRC physical register
assigned for the other register operand.

Reviewed By: reames

Differential Revision: https://reviews.llvm.org/D139079

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
    llvm/test/CodeGen/RISCV/atomic-cmpxchg-flag.ll
    llvm/test/CodeGen/RISCV/atomic-rmw.ll
    llvm/test/CodeGen/RISCV/atomic-signext.ll
    llvm/test/CodeGen/RISCV/branch-relaxation.ll
    llvm/test/CodeGen/RISCV/bswap-bitreverse.ll
    llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
    llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
    llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
    llvm/test/CodeGen/RISCV/div-by-constant.ll
    llvm/test/CodeGen/RISCV/div-pow2.ll
    llvm/test/CodeGen/RISCV/div.ll
    llvm/test/CodeGen/RISCV/double-arith-strict.ll
    llvm/test/CodeGen/RISCV/double-arith.ll
    llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
    llvm/test/CodeGen/RISCV/float-arith-strict.ll
    llvm/test/CodeGen/RISCV/float-arith.ll
    llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
    llvm/test/CodeGen/RISCV/float-convert.ll
    llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
    llvm/test/CodeGen/RISCV/forced-atomics.ll
    llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
    llvm/test/CodeGen/RISCV/half-convert.ll
    llvm/test/CodeGen/RISCV/half-fcmp-strict.ll
    llvm/test/CodeGen/RISCV/mul.ll
    llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
    llvm/test/CodeGen/RISCV/rv32zbb.ll
    llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll
    llvm/test/CodeGen/RISCV/rv64zbb.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
    llvm/test/CodeGen/RISCV/select-binop-identity.ll
    llvm/test/CodeGen/RISCV/setcc-logic.ll
    llvm/test/CodeGen/RISCV/shadowcallstack.ll
    llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
    llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
    llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
    llvm/test/CodeGen/RISCV/unaligned-load-store.ll
    llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll
    llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
    llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
    llvm/test/CodeGen/RISCV/xaluo.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index e134fb3f7bf6..1d5b18ad9db2 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -438,14 +438,21 @@ bool RISCVRegisterInfo::getRegAllocationHints(
     }
   };
 
-  // For now we support the compressible instructions which can encode all
-  // registers and have a single register source.
-  // TODO: Add more compressed instructions.
+  // This is all of the compressible binary instructions. If an instruction
+  // needs GPRC register class operands \p NeedGPRC will be set to true.
   auto isCompressible = [](const MachineInstr &MI, bool &NeedGPRC) {
     NeedGPRC = false;
     switch (MI.getOpcode()) {
     default:
       return false;
+    case RISCV::AND:
+    case RISCV::OR:
+    case RISCV::XOR:
+    case RISCV::SUB:
+    case RISCV::ADDW:
+    case RISCV::SUBW:
+      NeedGPRC = true;
+      return true;
     case RISCV::ANDI:
       NeedGPRC = true;
       return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
@@ -462,18 +469,35 @@ bool RISCVRegisterInfo::getRegAllocationHints(
     }
   };
 
+  // Returns true if this operand is compressible. For non-registers it always
+  // returns true. Immediate range was already checked in isCompressible.
+  // For registers, it checks if the register is a GPRC register. reg-reg
+  // instructions that require GPRC need all register operands to be GPRC.
+  auto isCompressibleOpnd = [&](const MachineOperand &MO) {
+    if (!MO.isReg())
+      return true;
+    Register Reg = MO.getReg();
+    Register PhysReg =
+        Register::isPhysicalRegister(Reg) ? Reg : Register(VRM->getPhys(Reg));
+    return PhysReg && RISCV::GPRCRegClass.contains(PhysReg);
+  };
+
   for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
     const MachineInstr &MI = *MO.getParent();
+    unsigned OpIdx = MI.getOperandNo(&MO);
     bool NeedGPRC;
     if (isCompressible(MI, NeedGPRC)) {
-      unsigned OpIdx = MI.getOperandNo(&MO);
       if (OpIdx == 0 && MI.getOperand(1).isReg()) {
-        tryAddHint(MO, MI.getOperand(1), NeedGPRC);
-        if (MI.isCommutable() && MI.getOperand(2).isReg())
+        if (!NeedGPRC || isCompressibleOpnd(MI.getOperand(2)))
+          tryAddHint(MO, MI.getOperand(1), NeedGPRC);
+        if (MI.isCommutable() && MI.getOperand(2).isReg() &&
+            (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1))))
           tryAddHint(MO, MI.getOperand(2), NeedGPRC);
-      } else if (OpIdx == 1) {
+      } else if (OpIdx == 1 &&
+                 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(2)))) {
         tryAddHint(MO, MI.getOperand(0), NeedGPRC);
-      } else if (MI.isCommutable() && OpIdx == 2) {
+      } else if (MI.isCommutable() && OpIdx == 2 &&
+                 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) {
         tryAddHint(MO, MI.getOperand(0), NeedGPRC);
       }
     }

diff  --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg-flag.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg-flag.ll
index 082b92bb6698..9855edbc0c86 100644
--- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg-flag.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg-flag.ll
@@ -18,8 +18,8 @@ define i1 @cmpxchg_i32_seq_cst_seq_cst(i32* %ptr, i32 signext %cmp,
 ; RV64IA-NEXT:    sc.w.aqrl a4, a2, (a0)
 ; RV64IA-NEXT:    bnez a4, .LBB0_1
 ; RV64IA-NEXT:  .LBB0_3: # %entry
-; RV64IA-NEXT:    xor a0, a3, a1
-; RV64IA-NEXT:    seqz a0, a0
+; RV64IA-NEXT:    xor a1, a3, a1
+; RV64IA-NEXT:    seqz a0, a1
 ; RV64IA-NEXT:    ret
         i32 signext %val) nounwind {
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
index c34ee47ec878..fc600360b948 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
@@ -6893,23 +6893,23 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
-; RV32IA-NEXT:    sub a3, a5, a3
+; RV32IA-NEXT:    sub a5, a5, a3
 ; RV32IA-NEXT:  .LBB90_1: # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    lr.w a5, (a2)
-; RV32IA-NEXT:    and a7, a5, a4
-; RV32IA-NEXT:    mv a6, a5
-; RV32IA-NEXT:    sll a7, a7, a3
-; RV32IA-NEXT:    sra a7, a7, a3
+; RV32IA-NEXT:    lr.w a3, (a2)
+; RV32IA-NEXT:    and a7, a3, a4
+; RV32IA-NEXT:    mv a6, a3
+; RV32IA-NEXT:    sll a7, a7, a5
+; RV32IA-NEXT:    sra a7, a7, a5
 ; RV32IA-NEXT:    bge a7, a1, .LBB90_3
 ; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB90_1 Depth=1
-; RV32IA-NEXT:    xor a6, a5, a1
+; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
-; RV32IA-NEXT:    xor a6, a5, a6
+; RV32IA-NEXT:    xor a6, a3, a6
 ; RV32IA-NEXT:  .LBB90_3: # in Loop: Header=BB90_1 Depth=1
 ; RV32IA-NEXT:    sc.w a6, a6, (a2)
 ; RV32IA-NEXT:    bnez a6, .LBB90_1
 ; RV32IA-NEXT:  # %bb.4:
-; RV32IA-NEXT:    srl a0, a5, a0
+; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_max_i16_monotonic:
@@ -6966,23 +6966,23 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
-; RV64IA-NEXT:    sub a3, a5, a3
+; RV64IA-NEXT:    sub a5, a5, a3
 ; RV64IA-NEXT:  .LBB90_1: # =>This Inner Loop Header: Depth=1
-; RV64IA-NEXT:    lr.w a5, (a2)
-; RV64IA-NEXT:    and a7, a5, a4
-; RV64IA-NEXT:    mv a6, a5
-; RV64IA-NEXT:    sll a7, a7, a3
-; RV64IA-NEXT:    sra a7, a7, a3
+; RV64IA-NEXT:    lr.w a3, (a2)
+; RV64IA-NEXT:    and a7, a3, a4
+; RV64IA-NEXT:    mv a6, a3
+; RV64IA-NEXT:    sll a7, a7, a5
+; RV64IA-NEXT:    sra a7, a7, a5
 ; RV64IA-NEXT:    bge a7, a1, .LBB90_3
 ; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB90_1 Depth=1
-; RV64IA-NEXT:    xor a6, a5, a1
+; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
-; RV64IA-NEXT:    xor a6, a5, a6
+; RV64IA-NEXT:    xor a6, a3, a6
 ; RV64IA-NEXT:  .LBB90_3: # in Loop: Header=BB90_1 Depth=1
 ; RV64IA-NEXT:    sc.w a6, a6, (a2)
 ; RV64IA-NEXT:    bnez a6, .LBB90_1
 ; RV64IA-NEXT:  # %bb.4:
-; RV64IA-NEXT:    srlw a0, a5, a0
+; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
   %1 = atomicrmw max i16* %a, i16 %b monotonic
   ret i16 %1
@@ -7043,23 +7043,23 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
-; RV32IA-NEXT:    sub a3, a5, a3
+; RV32IA-NEXT:    sub a5, a5, a3
 ; RV32IA-NEXT:  .LBB91_1: # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    lr.w.aq a5, (a2)
-; RV32IA-NEXT:    and a7, a5, a4
-; RV32IA-NEXT:    mv a6, a5
-; RV32IA-NEXT:    sll a7, a7, a3
-; RV32IA-NEXT:    sra a7, a7, a3
+; RV32IA-NEXT:    lr.w.aq a3, (a2)
+; RV32IA-NEXT:    and a7, a3, a4
+; RV32IA-NEXT:    mv a6, a3
+; RV32IA-NEXT:    sll a7, a7, a5
+; RV32IA-NEXT:    sra a7, a7, a5
 ; RV32IA-NEXT:    bge a7, a1, .LBB91_3
 ; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB91_1 Depth=1
-; RV32IA-NEXT:    xor a6, a5, a1
+; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
-; RV32IA-NEXT:    xor a6, a5, a6
+; RV32IA-NEXT:    xor a6, a3, a6
 ; RV32IA-NEXT:  .LBB91_3: # in Loop: Header=BB91_1 Depth=1
 ; RV32IA-NEXT:    sc.w a6, a6, (a2)
 ; RV32IA-NEXT:    bnez a6, .LBB91_1
 ; RV32IA-NEXT:  # %bb.4:
-; RV32IA-NEXT:    srl a0, a5, a0
+; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_max_i16_acquire:
@@ -7116,23 +7116,23 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
-; RV64IA-NEXT:    sub a3, a5, a3
+; RV64IA-NEXT:    sub a5, a5, a3
 ; RV64IA-NEXT:  .LBB91_1: # =>This Inner Loop Header: Depth=1
-; RV64IA-NEXT:    lr.w.aq a5, (a2)
-; RV64IA-NEXT:    and a7, a5, a4
-; RV64IA-NEXT:    mv a6, a5
-; RV64IA-NEXT:    sll a7, a7, a3
-; RV64IA-NEXT:    sra a7, a7, a3
+; RV64IA-NEXT:    lr.w.aq a3, (a2)
+; RV64IA-NEXT:    and a7, a3, a4
+; RV64IA-NEXT:    mv a6, a3
+; RV64IA-NEXT:    sll a7, a7, a5
+; RV64IA-NEXT:    sra a7, a7, a5
 ; RV64IA-NEXT:    bge a7, a1, .LBB91_3
 ; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB91_1 Depth=1
-; RV64IA-NEXT:    xor a6, a5, a1
+; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
-; RV64IA-NEXT:    xor a6, a5, a6
+; RV64IA-NEXT:    xor a6, a3, a6
 ; RV64IA-NEXT:  .LBB91_3: # in Loop: Header=BB91_1 Depth=1
 ; RV64IA-NEXT:    sc.w a6, a6, (a2)
 ; RV64IA-NEXT:    bnez a6, .LBB91_1
 ; RV64IA-NEXT:  # %bb.4:
-; RV64IA-NEXT:    srlw a0, a5, a0
+; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
   %1 = atomicrmw max i16* %a, i16 %b acquire
   ret i16 %1
@@ -7193,23 +7193,23 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
-; RV32IA-NEXT:    sub a3, a5, a3
+; RV32IA-NEXT:    sub a5, a5, a3
 ; RV32IA-NEXT:  .LBB92_1: # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    lr.w a5, (a2)
-; RV32IA-NEXT:    and a7, a5, a4
-; RV32IA-NEXT:    mv a6, a5
-; RV32IA-NEXT:    sll a7, a7, a3
-; RV32IA-NEXT:    sra a7, a7, a3
+; RV32IA-NEXT:    lr.w a3, (a2)
+; RV32IA-NEXT:    and a7, a3, a4
+; RV32IA-NEXT:    mv a6, a3
+; RV32IA-NEXT:    sll a7, a7, a5
+; RV32IA-NEXT:    sra a7, a7, a5
 ; RV32IA-NEXT:    bge a7, a1, .LBB92_3
 ; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB92_1 Depth=1
-; RV32IA-NEXT:    xor a6, a5, a1
+; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
-; RV32IA-NEXT:    xor a6, a5, a6
+; RV32IA-NEXT:    xor a6, a3, a6
 ; RV32IA-NEXT:  .LBB92_3: # in Loop: Header=BB92_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a6, a6, (a2)
 ; RV32IA-NEXT:    bnez a6, .LBB92_1
 ; RV32IA-NEXT:  # %bb.4:
-; RV32IA-NEXT:    srl a0, a5, a0
+; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_max_i16_release:
@@ -7266,23 +7266,23 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
-; RV64IA-NEXT:    sub a3, a5, a3
+; RV64IA-NEXT:    sub a5, a5, a3
 ; RV64IA-NEXT:  .LBB92_1: # =>This Inner Loop Header: Depth=1
-; RV64IA-NEXT:    lr.w a5, (a2)
-; RV64IA-NEXT:    and a7, a5, a4
-; RV64IA-NEXT:    mv a6, a5
-; RV64IA-NEXT:    sll a7, a7, a3
-; RV64IA-NEXT:    sra a7, a7, a3
+; RV64IA-NEXT:    lr.w a3, (a2)
+; RV64IA-NEXT:    and a7, a3, a4
+; RV64IA-NEXT:    mv a6, a3
+; RV64IA-NEXT:    sll a7, a7, a5
+; RV64IA-NEXT:    sra a7, a7, a5
 ; RV64IA-NEXT:    bge a7, a1, .LBB92_3
 ; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB92_1 Depth=1
-; RV64IA-NEXT:    xor a6, a5, a1
+; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
-; RV64IA-NEXT:    xor a6, a5, a6
+; RV64IA-NEXT:    xor a6, a3, a6
 ; RV64IA-NEXT:  .LBB92_3: # in Loop: Header=BB92_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a6, a6, (a2)
 ; RV64IA-NEXT:    bnez a6, .LBB92_1
 ; RV64IA-NEXT:  # %bb.4:
-; RV64IA-NEXT:    srlw a0, a5, a0
+; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
   %1 = atomicrmw max i16* %a, i16 %b release
   ret i16 %1
@@ -7343,23 +7343,23 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
-; RV32IA-NEXT:    sub a3, a5, a3
+; RV32IA-NEXT:    sub a5, a5, a3
 ; RV32IA-NEXT:  .LBB93_1: # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    lr.w.aq a5, (a2)
-; RV32IA-NEXT:    and a7, a5, a4
-; RV32IA-NEXT:    mv a6, a5
-; RV32IA-NEXT:    sll a7, a7, a3
-; RV32IA-NEXT:    sra a7, a7, a3
+; RV32IA-NEXT:    lr.w.aq a3, (a2)
+; RV32IA-NEXT:    and a7, a3, a4
+; RV32IA-NEXT:    mv a6, a3
+; RV32IA-NEXT:    sll a7, a7, a5
+; RV32IA-NEXT:    sra a7, a7, a5
 ; RV32IA-NEXT:    bge a7, a1, .LBB93_3
 ; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB93_1 Depth=1
-; RV32IA-NEXT:    xor a6, a5, a1
+; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
-; RV32IA-NEXT:    xor a6, a5, a6
+; RV32IA-NEXT:    xor a6, a3, a6
 ; RV32IA-NEXT:  .LBB93_3: # in Loop: Header=BB93_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a6, a6, (a2)
 ; RV32IA-NEXT:    bnez a6, .LBB93_1
 ; RV32IA-NEXT:  # %bb.4:
-; RV32IA-NEXT:    srl a0, a5, a0
+; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_max_i16_acq_rel:
@@ -7416,23 +7416,23 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
-; RV64IA-NEXT:    sub a3, a5, a3
+; RV64IA-NEXT:    sub a5, a5, a3
 ; RV64IA-NEXT:  .LBB93_1: # =>This Inner Loop Header: Depth=1
-; RV64IA-NEXT:    lr.w.aq a5, (a2)
-; RV64IA-NEXT:    and a7, a5, a4
-; RV64IA-NEXT:    mv a6, a5
-; RV64IA-NEXT:    sll a7, a7, a3
-; RV64IA-NEXT:    sra a7, a7, a3
+; RV64IA-NEXT:    lr.w.aq a3, (a2)
+; RV64IA-NEXT:    and a7, a3, a4
+; RV64IA-NEXT:    mv a6, a3
+; RV64IA-NEXT:    sll a7, a7, a5
+; RV64IA-NEXT:    sra a7, a7, a5
 ; RV64IA-NEXT:    bge a7, a1, .LBB93_3
 ; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB93_1 Depth=1
-; RV64IA-NEXT:    xor a6, a5, a1
+; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
-; RV64IA-NEXT:    xor a6, a5, a6
+; RV64IA-NEXT:    xor a6, a3, a6
 ; RV64IA-NEXT:  .LBB93_3: # in Loop: Header=BB93_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a6, a6, (a2)
 ; RV64IA-NEXT:    bnez a6, .LBB93_1
 ; RV64IA-NEXT:  # %bb.4:
-; RV64IA-NEXT:    srlw a0, a5, a0
+; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
   %1 = atomicrmw max i16* %a, i16 %b acq_rel
   ret i16 %1
@@ -7493,23 +7493,23 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
-; RV32IA-NEXT:    sub a3, a5, a3
+; RV32IA-NEXT:    sub a5, a5, a3
 ; RV32IA-NEXT:  .LBB94_1: # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    lr.w.aqrl a5, (a2)
-; RV32IA-NEXT:    and a7, a5, a4
-; RV32IA-NEXT:    mv a6, a5
-; RV32IA-NEXT:    sll a7, a7, a3
-; RV32IA-NEXT:    sra a7, a7, a3
+; RV32IA-NEXT:    lr.w.aqrl a3, (a2)
+; RV32IA-NEXT:    and a7, a3, a4
+; RV32IA-NEXT:    mv a6, a3
+; RV32IA-NEXT:    sll a7, a7, a5
+; RV32IA-NEXT:    sra a7, a7, a5
 ; RV32IA-NEXT:    bge a7, a1, .LBB94_3
 ; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB94_1 Depth=1
-; RV32IA-NEXT:    xor a6, a5, a1
+; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
-; RV32IA-NEXT:    xor a6, a5, a6
+; RV32IA-NEXT:    xor a6, a3, a6
 ; RV32IA-NEXT:  .LBB94_3: # in Loop: Header=BB94_1 Depth=1
 ; RV32IA-NEXT:    sc.w.aqrl a6, a6, (a2)
 ; RV32IA-NEXT:    bnez a6, .LBB94_1
 ; RV32IA-NEXT:  # %bb.4:
-; RV32IA-NEXT:    srl a0, a5, a0
+; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_max_i16_seq_cst:
@@ -7566,23 +7566,23 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
-; RV64IA-NEXT:    sub a3, a5, a3
+; RV64IA-NEXT:    sub a5, a5, a3
 ; RV64IA-NEXT:  .LBB94_1: # =>This Inner Loop Header: Depth=1
-; RV64IA-NEXT:    lr.w.aqrl a5, (a2)
-; RV64IA-NEXT:    and a7, a5, a4
-; RV64IA-NEXT:    mv a6, a5
-; RV64IA-NEXT:    sll a7, a7, a3
-; RV64IA-NEXT:    sra a7, a7, a3
+; RV64IA-NEXT:    lr.w.aqrl a3, (a2)
+; RV64IA-NEXT:    and a7, a3, a4
+; RV64IA-NEXT:    mv a6, a3
+; RV64IA-NEXT:    sll a7, a7, a5
+; RV64IA-NEXT:    sra a7, a7, a5
 ; RV64IA-NEXT:    bge a7, a1, .LBB94_3
 ; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB94_1 Depth=1
-; RV64IA-NEXT:    xor a6, a5, a1
+; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
-; RV64IA-NEXT:    xor a6, a5, a6
+; RV64IA-NEXT:    xor a6, a3, a6
 ; RV64IA-NEXT:  .LBB94_3: # in Loop: Header=BB94_1 Depth=1
 ; RV64IA-NEXT:    sc.w.aqrl a6, a6, (a2)
 ; RV64IA-NEXT:    bnez a6, .LBB94_1
 ; RV64IA-NEXT:  # %bb.4:
-; RV64IA-NEXT:    srlw a0, a5, a0
+; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
   %1 = atomicrmw max i16* %a, i16 %b seq_cst
   ret i16 %1
@@ -7643,23 +7643,23 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
-; RV32IA-NEXT:    sub a3, a5, a3
+; RV32IA-NEXT:    sub a5, a5, a3
 ; RV32IA-NEXT:  .LBB95_1: # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    lr.w a5, (a2)
-; RV32IA-NEXT:    and a7, a5, a4
-; RV32IA-NEXT:    mv a6, a5
-; RV32IA-NEXT:    sll a7, a7, a3
-; RV32IA-NEXT:    sra a7, a7, a3
+; RV32IA-NEXT:    lr.w a3, (a2)
+; RV32IA-NEXT:    and a7, a3, a4
+; RV32IA-NEXT:    mv a6, a3
+; RV32IA-NEXT:    sll a7, a7, a5
+; RV32IA-NEXT:    sra a7, a7, a5
 ; RV32IA-NEXT:    bge a1, a7, .LBB95_3
 ; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB95_1 Depth=1
-; RV32IA-NEXT:    xor a6, a5, a1
+; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
-; RV32IA-NEXT:    xor a6, a5, a6
+; RV32IA-NEXT:    xor a6, a3, a6
 ; RV32IA-NEXT:  .LBB95_3: # in Loop: Header=BB95_1 Depth=1
 ; RV32IA-NEXT:    sc.w a6, a6, (a2)
 ; RV32IA-NEXT:    bnez a6, .LBB95_1
 ; RV32IA-NEXT:  # %bb.4:
-; RV32IA-NEXT:    srl a0, a5, a0
+; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_min_i16_monotonic:
@@ -7716,23 +7716,23 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
-; RV64IA-NEXT:    sub a3, a5, a3
+; RV64IA-NEXT:    sub a5, a5, a3
 ; RV64IA-NEXT:  .LBB95_1: # =>This Inner Loop Header: Depth=1
-; RV64IA-NEXT:    lr.w a5, (a2)
-; RV64IA-NEXT:    and a7, a5, a4
-; RV64IA-NEXT:    mv a6, a5
-; RV64IA-NEXT:    sll a7, a7, a3
-; RV64IA-NEXT:    sra a7, a7, a3
+; RV64IA-NEXT:    lr.w a3, (a2)
+; RV64IA-NEXT:    and a7, a3, a4
+; RV64IA-NEXT:    mv a6, a3
+; RV64IA-NEXT:    sll a7, a7, a5
+; RV64IA-NEXT:    sra a7, a7, a5
 ; RV64IA-NEXT:    bge a1, a7, .LBB95_3
 ; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB95_1 Depth=1
-; RV64IA-NEXT:    xor a6, a5, a1
+; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
-; RV64IA-NEXT:    xor a6, a5, a6
+; RV64IA-NEXT:    xor a6, a3, a6
 ; RV64IA-NEXT:  .LBB95_3: # in Loop: Header=BB95_1 Depth=1
 ; RV64IA-NEXT:    sc.w a6, a6, (a2)
 ; RV64IA-NEXT:    bnez a6, .LBB95_1
 ; RV64IA-NEXT:  # %bb.4:
-; RV64IA-NEXT:    srlw a0, a5, a0
+; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
   %1 = atomicrmw min i16* %a, i16 %b monotonic
   ret i16 %1
@@ -7793,23 +7793,23 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
-; RV32IA-NEXT:    sub a3, a5, a3
+; RV32IA-NEXT:    sub a5, a5, a3
 ; RV32IA-NEXT:  .LBB96_1: # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    lr.w.aq a5, (a2)
-; RV32IA-NEXT:    and a7, a5, a4
-; RV32IA-NEXT:    mv a6, a5
-; RV32IA-NEXT:    sll a7, a7, a3
-; RV32IA-NEXT:    sra a7, a7, a3
+; RV32IA-NEXT:    lr.w.aq a3, (a2)
+; RV32IA-NEXT:    and a7, a3, a4
+; RV32IA-NEXT:    mv a6, a3
+; RV32IA-NEXT:    sll a7, a7, a5
+; RV32IA-NEXT:    sra a7, a7, a5
 ; RV32IA-NEXT:    bge a1, a7, .LBB96_3
 ; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB96_1 Depth=1
-; RV32IA-NEXT:    xor a6, a5, a1
+; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
-; RV32IA-NEXT:    xor a6, a5, a6
+; RV32IA-NEXT:    xor a6, a3, a6
 ; RV32IA-NEXT:  .LBB96_3: # in Loop: Header=BB96_1 Depth=1
 ; RV32IA-NEXT:    sc.w a6, a6, (a2)
 ; RV32IA-NEXT:    bnez a6, .LBB96_1
 ; RV32IA-NEXT:  # %bb.4:
-; RV32IA-NEXT:    srl a0, a5, a0
+; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_min_i16_acquire:
@@ -7866,23 +7866,23 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
-; RV64IA-NEXT:    sub a3, a5, a3
+; RV64IA-NEXT:    sub a5, a5, a3
 ; RV64IA-NEXT:  .LBB96_1: # =>This Inner Loop Header: Depth=1
-; RV64IA-NEXT:    lr.w.aq a5, (a2)
-; RV64IA-NEXT:    and a7, a5, a4
-; RV64IA-NEXT:    mv a6, a5
-; RV64IA-NEXT:    sll a7, a7, a3
-; RV64IA-NEXT:    sra a7, a7, a3
+; RV64IA-NEXT:    lr.w.aq a3, (a2)
+; RV64IA-NEXT:    and a7, a3, a4
+; RV64IA-NEXT:    mv a6, a3
+; RV64IA-NEXT:    sll a7, a7, a5
+; RV64IA-NEXT:    sra a7, a7, a5
 ; RV64IA-NEXT:    bge a1, a7, .LBB96_3
 ; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB96_1 Depth=1
-; RV64IA-NEXT:    xor a6, a5, a1
+; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
-; RV64IA-NEXT:    xor a6, a5, a6
+; RV64IA-NEXT:    xor a6, a3, a6
 ; RV64IA-NEXT:  .LBB96_3: # in Loop: Header=BB96_1 Depth=1
 ; RV64IA-NEXT:    sc.w a6, a6, (a2)
 ; RV64IA-NEXT:    bnez a6, .LBB96_1
 ; RV64IA-NEXT:  # %bb.4:
-; RV64IA-NEXT:    srlw a0, a5, a0
+; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
   %1 = atomicrmw min i16* %a, i16 %b acquire
   ret i16 %1
@@ -7943,23 +7943,23 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
-; RV32IA-NEXT:    sub a3, a5, a3
+; RV32IA-NEXT:    sub a5, a5, a3
 ; RV32IA-NEXT:  .LBB97_1: # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    lr.w a5, (a2)
-; RV32IA-NEXT:    and a7, a5, a4
-; RV32IA-NEXT:    mv a6, a5
-; RV32IA-NEXT:    sll a7, a7, a3
-; RV32IA-NEXT:    sra a7, a7, a3
+; RV32IA-NEXT:    lr.w a3, (a2)
+; RV32IA-NEXT:    and a7, a3, a4
+; RV32IA-NEXT:    mv a6, a3
+; RV32IA-NEXT:    sll a7, a7, a5
+; RV32IA-NEXT:    sra a7, a7, a5
 ; RV32IA-NEXT:    bge a1, a7, .LBB97_3
 ; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB97_1 Depth=1
-; RV32IA-NEXT:    xor a6, a5, a1
+; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
-; RV32IA-NEXT:    xor a6, a5, a6
+; RV32IA-NEXT:    xor a6, a3, a6
 ; RV32IA-NEXT:  .LBB97_3: # in Loop: Header=BB97_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a6, a6, (a2)
 ; RV32IA-NEXT:    bnez a6, .LBB97_1
 ; RV32IA-NEXT:  # %bb.4:
-; RV32IA-NEXT:    srl a0, a5, a0
+; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_min_i16_release:
@@ -8016,23 +8016,23 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
-; RV64IA-NEXT:    sub a3, a5, a3
+; RV64IA-NEXT:    sub a5, a5, a3
 ; RV64IA-NEXT:  .LBB97_1: # =>This Inner Loop Header: Depth=1
-; RV64IA-NEXT:    lr.w a5, (a2)
-; RV64IA-NEXT:    and a7, a5, a4
-; RV64IA-NEXT:    mv a6, a5
-; RV64IA-NEXT:    sll a7, a7, a3
-; RV64IA-NEXT:    sra a7, a7, a3
+; RV64IA-NEXT:    lr.w a3, (a2)
+; RV64IA-NEXT:    and a7, a3, a4
+; RV64IA-NEXT:    mv a6, a3
+; RV64IA-NEXT:    sll a7, a7, a5
+; RV64IA-NEXT:    sra a7, a7, a5
 ; RV64IA-NEXT:    bge a1, a7, .LBB97_3
 ; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB97_1 Depth=1
-; RV64IA-NEXT:    xor a6, a5, a1
+; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
-; RV64IA-NEXT:    xor a6, a5, a6
+; RV64IA-NEXT:    xor a6, a3, a6
 ; RV64IA-NEXT:  .LBB97_3: # in Loop: Header=BB97_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a6, a6, (a2)
 ; RV64IA-NEXT:    bnez a6, .LBB97_1
 ; RV64IA-NEXT:  # %bb.4:
-; RV64IA-NEXT:    srlw a0, a5, a0
+; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
   %1 = atomicrmw min i16* %a, i16 %b release
   ret i16 %1
@@ -8093,23 +8093,23 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
-; RV32IA-NEXT:    sub a3, a5, a3
+; RV32IA-NEXT:    sub a5, a5, a3
 ; RV32IA-NEXT:  .LBB98_1: # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    lr.w.aq a5, (a2)
-; RV32IA-NEXT:    and a7, a5, a4
-; RV32IA-NEXT:    mv a6, a5
-; RV32IA-NEXT:    sll a7, a7, a3
-; RV32IA-NEXT:    sra a7, a7, a3
+; RV32IA-NEXT:    lr.w.aq a3, (a2)
+; RV32IA-NEXT:    and a7, a3, a4
+; RV32IA-NEXT:    mv a6, a3
+; RV32IA-NEXT:    sll a7, a7, a5
+; RV32IA-NEXT:    sra a7, a7, a5
 ; RV32IA-NEXT:    bge a1, a7, .LBB98_3
 ; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB98_1 Depth=1
-; RV32IA-NEXT:    xor a6, a5, a1
+; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
-; RV32IA-NEXT:    xor a6, a5, a6
+; RV32IA-NEXT:    xor a6, a3, a6
 ; RV32IA-NEXT:  .LBB98_3: # in Loop: Header=BB98_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a6, a6, (a2)
 ; RV32IA-NEXT:    bnez a6, .LBB98_1
 ; RV32IA-NEXT:  # %bb.4:
-; RV32IA-NEXT:    srl a0, a5, a0
+; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_min_i16_acq_rel:
@@ -8166,23 +8166,23 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
-; RV64IA-NEXT:    sub a3, a5, a3
+; RV64IA-NEXT:    sub a5, a5, a3
 ; RV64IA-NEXT:  .LBB98_1: # =>This Inner Loop Header: Depth=1
-; RV64IA-NEXT:    lr.w.aq a5, (a2)
-; RV64IA-NEXT:    and a7, a5, a4
-; RV64IA-NEXT:    mv a6, a5
-; RV64IA-NEXT:    sll a7, a7, a3
-; RV64IA-NEXT:    sra a7, a7, a3
+; RV64IA-NEXT:    lr.w.aq a3, (a2)
+; RV64IA-NEXT:    and a7, a3, a4
+; RV64IA-NEXT:    mv a6, a3
+; RV64IA-NEXT:    sll a7, a7, a5
+; RV64IA-NEXT:    sra a7, a7, a5
 ; RV64IA-NEXT:    bge a1, a7, .LBB98_3
 ; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB98_1 Depth=1
-; RV64IA-NEXT:    xor a6, a5, a1
+; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
-; RV64IA-NEXT:    xor a6, a5, a6
+; RV64IA-NEXT:    xor a6, a3, a6
 ; RV64IA-NEXT:  .LBB98_3: # in Loop: Header=BB98_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a6, a6, (a2)
 ; RV64IA-NEXT:    bnez a6, .LBB98_1
 ; RV64IA-NEXT:  # %bb.4:
-; RV64IA-NEXT:    srlw a0, a5, a0
+; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
   %1 = atomicrmw min i16* %a, i16 %b acq_rel
   ret i16 %1
@@ -8243,23 +8243,23 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
-; RV32IA-NEXT:    sub a3, a5, a3
+; RV32IA-NEXT:    sub a5, a5, a3
 ; RV32IA-NEXT:  .LBB99_1: # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    lr.w.aqrl a5, (a2)
-; RV32IA-NEXT:    and a7, a5, a4
-; RV32IA-NEXT:    mv a6, a5
-; RV32IA-NEXT:    sll a7, a7, a3
-; RV32IA-NEXT:    sra a7, a7, a3
+; RV32IA-NEXT:    lr.w.aqrl a3, (a2)
+; RV32IA-NEXT:    and a7, a3, a4
+; RV32IA-NEXT:    mv a6, a3
+; RV32IA-NEXT:    sll a7, a7, a5
+; RV32IA-NEXT:    sra a7, a7, a5
 ; RV32IA-NEXT:    bge a1, a7, .LBB99_3
 ; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB99_1 Depth=1
-; RV32IA-NEXT:    xor a6, a5, a1
+; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
-; RV32IA-NEXT:    xor a6, a5, a6
+; RV32IA-NEXT:    xor a6, a3, a6
 ; RV32IA-NEXT:  .LBB99_3: # in Loop: Header=BB99_1 Depth=1
 ; RV32IA-NEXT:    sc.w.aqrl a6, a6, (a2)
 ; RV32IA-NEXT:    bnez a6, .LBB99_1
 ; RV32IA-NEXT:  # %bb.4:
-; RV32IA-NEXT:    srl a0, a5, a0
+; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_min_i16_seq_cst:
@@ -8316,23 +8316,23 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
-; RV64IA-NEXT:    sub a3, a5, a3
+; RV64IA-NEXT:    sub a5, a5, a3
 ; RV64IA-NEXT:  .LBB99_1: # =>This Inner Loop Header: Depth=1
-; RV64IA-NEXT:    lr.w.aqrl a5, (a2)
-; RV64IA-NEXT:    and a7, a5, a4
-; RV64IA-NEXT:    mv a6, a5
-; RV64IA-NEXT:    sll a7, a7, a3
-; RV64IA-NEXT:    sra a7, a7, a3
+; RV64IA-NEXT:    lr.w.aqrl a3, (a2)
+; RV64IA-NEXT:    and a7, a3, a4
+; RV64IA-NEXT:    mv a6, a3
+; RV64IA-NEXT:    sll a7, a7, a5
+; RV64IA-NEXT:    sra a7, a7, a5
 ; RV64IA-NEXT:    bge a1, a7, .LBB99_3
 ; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB99_1 Depth=1
-; RV64IA-NEXT:    xor a6, a5, a1
+; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
-; RV64IA-NEXT:    xor a6, a5, a6
+; RV64IA-NEXT:    xor a6, a3, a6
 ; RV64IA-NEXT:  .LBB99_3: # in Loop: Header=BB99_1 Depth=1
 ; RV64IA-NEXT:    sc.w.aqrl a6, a6, (a2)
 ; RV64IA-NEXT:    bnez a6, .LBB99_1
 ; RV64IA-NEXT:  # %bb.4:
-; RV64IA-NEXT:    srlw a0, a5, a0
+; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
   %1 = atomicrmw min i16* %a, i16 %b seq_cst
   ret i16 %1

diff  --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll
index b4da0d62b3d7..2f592121a244 100644
--- a/llvm/test/CodeGen/RISCV/atomic-signext.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll
@@ -1678,23 +1678,23 @@ define signext i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
-; RV32IA-NEXT:    sub a3, a5, a3
+; RV32IA-NEXT:    sub a5, a5, a3
 ; RV32IA-NEXT:  .LBB21_1: # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    lr.w a5, (a2)
-; RV32IA-NEXT:    and a7, a5, a4
-; RV32IA-NEXT:    mv a6, a5
-; RV32IA-NEXT:    sll a7, a7, a3
-; RV32IA-NEXT:    sra a7, a7, a3
+; RV32IA-NEXT:    lr.w a3, (a2)
+; RV32IA-NEXT:    and a7, a3, a4
+; RV32IA-NEXT:    mv a6, a3
+; RV32IA-NEXT:    sll a7, a7, a5
+; RV32IA-NEXT:    sra a7, a7, a5
 ; RV32IA-NEXT:    bge a7, a1, .LBB21_3
 ; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB21_1 Depth=1
-; RV32IA-NEXT:    xor a6, a5, a1
+; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
-; RV32IA-NEXT:    xor a6, a5, a6
+; RV32IA-NEXT:    xor a6, a3, a6
 ; RV32IA-NEXT:  .LBB21_3: # in Loop: Header=BB21_1 Depth=1
 ; RV32IA-NEXT:    sc.w a6, a6, (a2)
 ; RV32IA-NEXT:    bnez a6, .LBB21_1
 ; RV32IA-NEXT:  # %bb.4:
-; RV32IA-NEXT:    srl a0, a5, a0
+; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    slli a0, a0, 16
 ; RV32IA-NEXT:    srai a0, a0, 16
 ; RV32IA-NEXT:    ret
@@ -1754,23 +1754,23 @@ define signext i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
-; RV64IA-NEXT:    sub a3, a5, a3
+; RV64IA-NEXT:    sub a5, a5, a3
 ; RV64IA-NEXT:  .LBB21_1: # =>This Inner Loop Header: Depth=1
-; RV64IA-NEXT:    lr.w a5, (a2)
-; RV64IA-NEXT:    and a7, a5, a4
-; RV64IA-NEXT:    mv a6, a5
-; RV64IA-NEXT:    sll a7, a7, a3
-; RV64IA-NEXT:    sra a7, a7, a3
+; RV64IA-NEXT:    lr.w a3, (a2)
+; RV64IA-NEXT:    and a7, a3, a4
+; RV64IA-NEXT:    mv a6, a3
+; RV64IA-NEXT:    sll a7, a7, a5
+; RV64IA-NEXT:    sra a7, a7, a5
 ; RV64IA-NEXT:    bge a7, a1, .LBB21_3
 ; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB21_1 Depth=1
-; RV64IA-NEXT:    xor a6, a5, a1
+; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
-; RV64IA-NEXT:    xor a6, a5, a6
+; RV64IA-NEXT:    xor a6, a3, a6
 ; RV64IA-NEXT:  .LBB21_3: # in Loop: Header=BB21_1 Depth=1
 ; RV64IA-NEXT:    sc.w a6, a6, (a2)
 ; RV64IA-NEXT:    bnez a6, .LBB21_1
 ; RV64IA-NEXT:  # %bb.4:
-; RV64IA-NEXT:    srlw a0, a5, a0
+; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    slli a0, a0, 48
 ; RV64IA-NEXT:    srai a0, a0, 48
 ; RV64IA-NEXT:    ret
@@ -1834,23 +1834,23 @@ define signext i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
-; RV32IA-NEXT:    sub a3, a5, a3
+; RV32IA-NEXT:    sub a5, a5, a3
 ; RV32IA-NEXT:  .LBB22_1: # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    lr.w a5, (a2)
-; RV32IA-NEXT:    and a7, a5, a4
-; RV32IA-NEXT:    mv a6, a5
-; RV32IA-NEXT:    sll a7, a7, a3
-; RV32IA-NEXT:    sra a7, a7, a3
+; RV32IA-NEXT:    lr.w a3, (a2)
+; RV32IA-NEXT:    and a7, a3, a4
+; RV32IA-NEXT:    mv a6, a3
+; RV32IA-NEXT:    sll a7, a7, a5
+; RV32IA-NEXT:    sra a7, a7, a5
 ; RV32IA-NEXT:    bge a1, a7, .LBB22_3
 ; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB22_1 Depth=1
-; RV32IA-NEXT:    xor a6, a5, a1
+; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
-; RV32IA-NEXT:    xor a6, a5, a6
+; RV32IA-NEXT:    xor a6, a3, a6
 ; RV32IA-NEXT:  .LBB22_3: # in Loop: Header=BB22_1 Depth=1
 ; RV32IA-NEXT:    sc.w a6, a6, (a2)
 ; RV32IA-NEXT:    bnez a6, .LBB22_1
 ; RV32IA-NEXT:  # %bb.4:
-; RV32IA-NEXT:    srl a0, a5, a0
+; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    slli a0, a0, 16
 ; RV32IA-NEXT:    srai a0, a0, 16
 ; RV32IA-NEXT:    ret
@@ -1910,23 +1910,23 @@ define signext i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
-; RV64IA-NEXT:    sub a3, a5, a3
+; RV64IA-NEXT:    sub a5, a5, a3
 ; RV64IA-NEXT:  .LBB22_1: # =>This Inner Loop Header: Depth=1
-; RV64IA-NEXT:    lr.w a5, (a2)
-; RV64IA-NEXT:    and a7, a5, a4
-; RV64IA-NEXT:    mv a6, a5
-; RV64IA-NEXT:    sll a7, a7, a3
-; RV64IA-NEXT:    sra a7, a7, a3
+; RV64IA-NEXT:    lr.w a3, (a2)
+; RV64IA-NEXT:    and a7, a3, a4
+; RV64IA-NEXT:    mv a6, a3
+; RV64IA-NEXT:    sll a7, a7, a5
+; RV64IA-NEXT:    sra a7, a7, a5
 ; RV64IA-NEXT:    bge a1, a7, .LBB22_3
 ; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB22_1 Depth=1
-; RV64IA-NEXT:    xor a6, a5, a1
+; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
-; RV64IA-NEXT:    xor a6, a5, a6
+; RV64IA-NEXT:    xor a6, a3, a6
 ; RV64IA-NEXT:  .LBB22_3: # in Loop: Header=BB22_1 Depth=1
 ; RV64IA-NEXT:    sc.w a6, a6, (a2)
 ; RV64IA-NEXT:    bnez a6, .LBB22_1
 ; RV64IA-NEXT:  # %bb.4:
-; RV64IA-NEXT:    srlw a0, a5, a0
+; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    slli a0, a0, 48
 ; RV64IA-NEXT:    srai a0, a0, 48
 ; RV64IA-NEXT:    ret
@@ -3864,9 +3864,9 @@ define i1 @cmpxchg_i8_monotonic_monotonic_val1(i8* %ptr, i8 signext %cmp, i8 sig
 ; RV32IA-NEXT:    sc.w a5, a5, (a3)
 ; RV32IA-NEXT:    bnez a5, .LBB48_1
 ; RV32IA-NEXT:  .LBB48_3:
-; RV32IA-NEXT:    and a0, a2, a4
-; RV32IA-NEXT:    xor a0, a1, a0
-; RV32IA-NEXT:    seqz a0, a0
+; RV32IA-NEXT:    and a2, a2, a4
+; RV32IA-NEXT:    xor a1, a1, a2
+; RV32IA-NEXT:    seqz a0, a1
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: cmpxchg_i8_monotonic_monotonic_val1:
@@ -3903,9 +3903,9 @@ define i1 @cmpxchg_i8_monotonic_monotonic_val1(i8* %ptr, i8 signext %cmp, i8 sig
 ; RV64IA-NEXT:    sc.w a5, a5, (a3)
 ; RV64IA-NEXT:    bnez a5, .LBB48_1
 ; RV64IA-NEXT:  .LBB48_3:
-; RV64IA-NEXT:    and a0, a2, a4
-; RV64IA-NEXT:    xor a0, a1, a0
-; RV64IA-NEXT:    seqz a0, a0
+; RV64IA-NEXT:    and a2, a2, a4
+; RV64IA-NEXT:    xor a1, a1, a2
+; RV64IA-NEXT:    seqz a0, a1
 ; RV64IA-NEXT:    ret
   %1 = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic
   %2 = extractvalue { i8, i1 } %1, 1
@@ -4035,9 +4035,9 @@ define i1 @cmpxchg_i16_monotonic_monotonic_val1(i16* %ptr, i16 signext %cmp, i16
 ; RV32IA-NEXT:    sc.w a4, a4, (a3)
 ; RV32IA-NEXT:    bnez a4, .LBB50_1
 ; RV32IA-NEXT:  .LBB50_3:
-; RV32IA-NEXT:    and a0, a2, a5
-; RV32IA-NEXT:    xor a0, a1, a0
-; RV32IA-NEXT:    seqz a0, a0
+; RV32IA-NEXT:    and a2, a2, a5
+; RV32IA-NEXT:    xor a1, a1, a2
+; RV32IA-NEXT:    seqz a0, a1
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: cmpxchg_i16_monotonic_monotonic_val1:
@@ -4075,9 +4075,9 @@ define i1 @cmpxchg_i16_monotonic_monotonic_val1(i16* %ptr, i16 signext %cmp, i16
 ; RV64IA-NEXT:    sc.w a4, a4, (a3)
 ; RV64IA-NEXT:    bnez a4, .LBB50_1
 ; RV64IA-NEXT:  .LBB50_3:
-; RV64IA-NEXT:    and a0, a2, a5
-; RV64IA-NEXT:    xor a0, a1, a0
-; RV64IA-NEXT:    seqz a0, a0
+; RV64IA-NEXT:    and a2, a2, a5
+; RV64IA-NEXT:    xor a1, a1, a2
+; RV64IA-NEXT:    seqz a0, a1
 ; RV64IA-NEXT:    ret
   %1 = cmpxchg i16* %ptr, i16 %cmp, i16 %val monotonic monotonic
   %2 = extractvalue { i16, i1 } %1, 1
@@ -4164,8 +4164,8 @@ define i1 @cmpxchg_i32_monotonic_monotonic_val1(i32* %ptr, i32 signext %cmp, i32
 ; RV32IA-NEXT:    sc.w a4, a2, (a0)
 ; RV32IA-NEXT:    bnez a4, .LBB52_1
 ; RV32IA-NEXT:  .LBB52_3:
-; RV32IA-NEXT:    xor a0, a3, a1
-; RV32IA-NEXT:    seqz a0, a0
+; RV32IA-NEXT:    xor a1, a3, a1
+; RV32IA-NEXT:    seqz a0, a1
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: cmpxchg_i32_monotonic_monotonic_val1:
@@ -4190,8 +4190,8 @@ define i1 @cmpxchg_i32_monotonic_monotonic_val1(i32* %ptr, i32 signext %cmp, i32
 ; RV64IA-NEXT:    sc.w a4, a2, (a0)
 ; RV64IA-NEXT:    bnez a4, .LBB52_1
 ; RV64IA-NEXT:  .LBB52_3:
-; RV64IA-NEXT:    xor a0, a3, a1
-; RV64IA-NEXT:    seqz a0, a0
+; RV64IA-NEXT:    xor a1, a3, a1
+; RV64IA-NEXT:    seqz a0, a1
 ; RV64IA-NEXT:    ret
   %1 = cmpxchg i32* %ptr, i32 %cmp, i32 %val monotonic monotonic
   %2 = extractvalue { i32, i1 } %1, 1

diff  --git a/llvm/test/CodeGen/RISCV/branch-relaxation.ll b/llvm/test/CodeGen/RISCV/branch-relaxation.ll
index ba9b2d39c506..431179eff4cc 100644
--- a/llvm/test/CodeGen/RISCV/branch-relaxation.ll
+++ b/llvm/test/CodeGen/RISCV/branch-relaxation.ll
@@ -2941,11 +2941,9 @@ define void @relax_jal_spill_32_restore_block_correspondence() {
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    li ra, 1
 ; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    sd ra, 16(sp) # 8-byte Folded Spill
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    li t0, 5
 ; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    sd t0, 8(sp) # 8-byte Folded Spill
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    li t1, 6
 ; CHECK-RV64-NEXT:    #NO_APP
@@ -3024,23 +3022,25 @@ define void @relax_jal_spill_32_restore_block_correspondence() {
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    li t6, 31
 ; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    sext.w t0, t6
-; CHECK-RV64-NEXT:    sext.w ra, t5
-; CHECK-RV64-NEXT:    bne ra, t0, .LBB6_1
+; CHECK-RV64-NEXT:    sd t6, 8(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sext.w t6, t6
+; CHECK-RV64-NEXT:    sd t5, 16(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sext.w t5, t5
+; CHECK-RV64-NEXT:    bne t5, t6, .LBB6_1
 ; CHECK-RV64-NEXT:  # %bb.7: # %entry
-; CHECK-RV64-NEXT:    jump .LBB6_4, t0
+; CHECK-RV64-NEXT:    jump .LBB6_4, t5
 ; CHECK-RV64-NEXT:  .LBB6_1: # %cond_2
-; CHECK-RV64-NEXT:    sext.w t0, t4
-; CHECK-RV64-NEXT:    sext.w ra, t3
-; CHECK-RV64-NEXT:    bne ra, t0, .LBB6_2
+; CHECK-RV64-NEXT:    sext.w t5, t4
+; CHECK-RV64-NEXT:    sext.w t6, t3
+; CHECK-RV64-NEXT:    bne t6, t5, .LBB6_2
 ; CHECK-RV64-NEXT:  # %bb.9: # %cond_2
-; CHECK-RV64-NEXT:    jump .LBB6_5, t0
+; CHECK-RV64-NEXT:    jump .LBB6_5, t5
 ; CHECK-RV64-NEXT:  .LBB6_2: # %cond_3
-; CHECK-RV64-NEXT:    sext.w t0, t2
-; CHECK-RV64-NEXT:    sext.w ra, t1
-; CHECK-RV64-NEXT:    bne ra, t0, .LBB6_3
+; CHECK-RV64-NEXT:    sext.w t5, t2
+; CHECK-RV64-NEXT:    sext.w t6, t1
+; CHECK-RV64-NEXT:    bne t6, t5, .LBB6_3
 ; CHECK-RV64-NEXT:  # %bb.11: # %cond_3
-; CHECK-RV64-NEXT:    jump .LBB6_6, t0
+; CHECK-RV64-NEXT:    jump .LBB6_6, t5
 ; CHECK-RV64-NEXT:  .LBB6_3: # %space
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    .zero 1048576
@@ -3057,11 +3057,9 @@ define void @relax_jal_spill_32_restore_block_correspondence() {
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    # dest 3
 ; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    ld ra, 16(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    # reg use ra
 ; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    ld t0, 8(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    # reg use t0
 ; CHECK-RV64-NEXT:    #NO_APP
@@ -3137,9 +3135,11 @@ define void @relax_jal_spill_32_restore_block_correspondence() {
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    # reg use t4
 ; CHECK-RV64-NEXT:    #NO_APP
+; CHECK-RV64-NEXT:    ld t5, 16(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    # reg use t5
 ; CHECK-RV64-NEXT:    #NO_APP
+; CHECK-RV64-NEXT:    ld t6, 8(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    # reg use t6
 ; CHECK-RV64-NEXT:    #NO_APP

diff  --git a/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll b/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll
index 3db0ed8c9589..616b9fce88c9 100644
--- a/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll
+++ b/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll
@@ -139,11 +139,11 @@ define i64 @test_bswap_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    slli a5, a5, 24
 ; RV64I-NEXT:    or a3, a5, a3
 ; RV64I-NEXT:    or a1, a3, a1
-; RV64I-NEXT:    and a3, a0, a4
-; RV64I-NEXT:    slli a3, a3, 24
-; RV64I-NEXT:    srliw a4, a0, 24
-; RV64I-NEXT:    slli a4, a4, 32
-; RV64I-NEXT:    or a3, a3, a4
+; RV64I-NEXT:    and a4, a0, a4
+; RV64I-NEXT:    slli a4, a4, 24
+; RV64I-NEXT:    srliw a3, a0, 24
+; RV64I-NEXT:    slli a3, a3, 32
+; RV64I-NEXT:    or a3, a4, a3
 ; RV64I-NEXT:    and a2, a0, a2
 ; RV64I-NEXT:    slli a2, a2, 40
 ; RV64I-NEXT:    slli a0, a0, 56
@@ -611,11 +611,11 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    slli a5, a5, 24
 ; RV64I-NEXT:    or a3, a5, a3
 ; RV64I-NEXT:    or a1, a3, a1
-; RV64I-NEXT:    and a3, a0, a4
-; RV64I-NEXT:    slli a3, a3, 24
-; RV64I-NEXT:    srliw a4, a0, 24
-; RV64I-NEXT:    slli a4, a4, 32
-; RV64I-NEXT:    or a3, a3, a4
+; RV64I-NEXT:    and a4, a0, a4
+; RV64I-NEXT:    slli a4, a4, 24
+; RV64I-NEXT:    srliw a3, a0, 24
+; RV64I-NEXT:    slli a3, a3, 32
+; RV64I-NEXT:    or a3, a4, a3
 ; RV64I-NEXT:    and a2, a0, a2
 ; RV64I-NEXT:    slli a2, a2, 40
 ; RV64I-NEXT:    slli a0, a0, 56

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
index 0b73661af5c0..979690a46751 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
@@ -202,8 +202,8 @@ define i32 @callee_large_scalars(i128 %a, fp128 %b) nounwind {
 ; RV32I-FPELIM-NEXT:    xor a4, a7, a4
 ; RV32I-FPELIM-NEXT:    or a4, a4, a5
 ; RV32I-FPELIM-NEXT:    xor a0, a0, a1
-; RV32I-FPELIM-NEXT:    xor a1, a3, a2
-; RV32I-FPELIM-NEXT:    or a0, a1, a0
+; RV32I-FPELIM-NEXT:    xor a2, a3, a2
+; RV32I-FPELIM-NEXT:    or a0, a2, a0
 ; RV32I-FPELIM-NEXT:    or a0, a0, a4
 ; RV32I-FPELIM-NEXT:    seqz a0, a0
 ; RV32I-FPELIM-NEXT:    ret
@@ -226,8 +226,8 @@ define i32 @callee_large_scalars(i128 %a, fp128 %b) nounwind {
 ; RV32I-WITHFP-NEXT:    xor a4, a7, a4
 ; RV32I-WITHFP-NEXT:    or a4, a4, a5
 ; RV32I-WITHFP-NEXT:    xor a0, a0, a1
-; RV32I-WITHFP-NEXT:    xor a1, a3, a2
-; RV32I-WITHFP-NEXT:    or a0, a1, a0
+; RV32I-WITHFP-NEXT:    xor a2, a3, a2
+; RV32I-WITHFP-NEXT:    or a0, a2, a0
 ; RV32I-WITHFP-NEXT:    or a0, a0, a4
 ; RV32I-WITHFP-NEXT:    seqz a0, a0
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
index 8d38a2e749af..a8b2b1ca3de5 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
@@ -118,8 +118,8 @@ define i64 @callee_large_scalars(i256 %a, i256 %b) nounwind {
 ; RV64I-NEXT:    xor a4, a7, a4
 ; RV64I-NEXT:    or a4, a4, a5
 ; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    xor a1, a3, a2
-; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    xor a2, a3, a2
+; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    or a0, a0, a4
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
index a3538476694a..0f4496818452 100644
--- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
@@ -2155,13 +2155,13 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    lui a2, 349525
 ; RV32I-NEXT:    addi s2, a2, 1365
 ; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    sub a0, a1, a0
-; RV32I-NEXT:    lui a1, 209715
-; RV32I-NEXT:    addi s3, a1, 819
-; RV32I-NEXT:    and a1, a0, s3
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    sub a1, a1, a0
+; RV32I-NEXT:    lui a0, 209715
+; RV32I-NEXT:    addi s3, a0, 819
+; RV32I-NEXT:    and a0, a1, s3
+; RV32I-NEXT:    srli a1, a1, 2
+; RV32I-NEXT:    and a1, a1, s3
+; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    srli a1, a0, 4
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    lui a1, 61681
@@ -2174,11 +2174,11 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    srli s5, a0, 24
 ; RV32I-NEXT:    srli a0, s0, 1
 ; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    sub a0, s0, a0
-; RV32I-NEXT:    and a1, a0, s3
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    sub s0, s0, a0
+; RV32I-NEXT:    and a0, s0, s3
+; RV32I-NEXT:    srli s0, s0, 2
+; RV32I-NEXT:    and a1, s0, s3
+; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    srli a1, a0, 4
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    and a0, a0, s4

diff  --git a/llvm/test/CodeGen/RISCV/div-by-constant.ll b/llvm/test/CodeGen/RISCV/div-by-constant.ll
index 9198cf478f3a..d20a9b929697 100644
--- a/llvm/test/CodeGen/RISCV/div-by-constant.ll
+++ b/llvm/test/CodeGen/RISCV/div-by-constant.ll
@@ -82,8 +82,8 @@ define i64 @udiv64_constant_no_add(i64 %a) nounwind {
 ; RV32-NEXT:    mulhu a6, a5, a4
 ; RV32-NEXT:    add a3, a6, a3
 ; RV32-NEXT:    sltu a0, a0, a2
-; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    mul a1, a0, a4
+; RV32-NEXT:    sub a1, a1, a0
+; RV32-NEXT:    mul a1, a1, a4
 ; RV32-NEXT:    add a1, a3, a1
 ; RV32-NEXT:    mul a0, a5, a4
 ; RV32-NEXT:    ret
@@ -339,10 +339,10 @@ define i32 @sdiv_constant_sub_srai(i32 %a) nounwind {
 ; RV32-NEXT:    lui a1, 449390
 ; RV32-NEXT:    addi a1, a1, -1171
 ; RV32-NEXT:    mulh a1, a0, a1
-; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    srli a1, a0, 31
-; RV32-NEXT:    srai a0, a0, 2
-; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    sub a1, a1, a0
+; RV32-NEXT:    srli a0, a1, 31
+; RV32-NEXT:    srai a1, a1, 2
+; RV32-NEXT:    add a0, a1, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: sdiv_constant_sub_srai:
@@ -352,10 +352,10 @@ define i32 @sdiv_constant_sub_srai(i32 %a) nounwind {
 ; RV64-NEXT:    addiw a2, a2, -1171
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    srli a1, a1, 32
-; RV64-NEXT:    subw a0, a1, a0
-; RV64-NEXT:    srliw a1, a0, 31
-; RV64-NEXT:    sraiw a0, a0, 2
-; RV64-NEXT:    add a0, a0, a1
+; RV64-NEXT:    subw a1, a1, a0
+; RV64-NEXT:    srliw a0, a1, 31
+; RV64-NEXT:    sraiw a1, a1, 2
+; RV64-NEXT:    add a0, a1, a0
 ; RV64-NEXT:    ret
   %1 = sdiv i32 %a, -7
   ret i32 %1
@@ -453,10 +453,10 @@ define i64 @sdiv64_constant_sub_srai(i64 %a) nounwind {
 ; RV64-NEXT:    lui a1, %hi(.LCPI15_0)
 ; RV64-NEXT:    ld a1, %lo(.LCPI15_0)(a1)
 ; RV64-NEXT:    mulh a1, a0, a1
-; RV64-NEXT:    sub a0, a1, a0
-; RV64-NEXT:    srli a1, a0, 63
-; RV64-NEXT:    srai a0, a0, 1
-; RV64-NEXT:    add a0, a0, a1
+; RV64-NEXT:    sub a1, a1, a0
+; RV64-NEXT:    srli a0, a1, 63
+; RV64-NEXT:    srai a1, a1, 1
+; RV64-NEXT:    add a0, a1, a0
 ; RV64-NEXT:    ret
   %1 = sdiv i64 %a, -3
   ret i64 %1
@@ -628,11 +628,11 @@ define i8 @sdiv8_constant_sub_srai(i8 %a) nounwind {
 ; RV32IM-NEXT:    li a2, 109
 ; RV32IM-NEXT:    mul a1, a1, a2
 ; RV32IM-NEXT:    srli a1, a1, 8
-; RV32IM-NEXT:    sub a0, a1, a0
-; RV32IM-NEXT:    slli a0, a0, 24
-; RV32IM-NEXT:    srli a1, a0, 31
-; RV32IM-NEXT:    srai a0, a0, 26
-; RV32IM-NEXT:    add a0, a0, a1
+; RV32IM-NEXT:    sub a1, a1, a0
+; RV32IM-NEXT:    slli a1, a1, 24
+; RV32IM-NEXT:    srli a0, a1, 31
+; RV32IM-NEXT:    srai a1, a1, 26
+; RV32IM-NEXT:    add a0, a1, a0
 ; RV32IM-NEXT:    ret
 ;
 ; RV32IMZB-LABEL: sdiv8_constant_sub_srai:
@@ -641,11 +641,11 @@ define i8 @sdiv8_constant_sub_srai(i8 %a) nounwind {
 ; RV32IMZB-NEXT:    li a2, 109
 ; RV32IMZB-NEXT:    mul a1, a1, a2
 ; RV32IMZB-NEXT:    srli a1, a1, 8
-; RV32IMZB-NEXT:    sub a0, a1, a0
-; RV32IMZB-NEXT:    slli a0, a0, 24
-; RV32IMZB-NEXT:    srli a1, a0, 31
-; RV32IMZB-NEXT:    srai a0, a0, 26
-; RV32IMZB-NEXT:    add a0, a0, a1
+; RV32IMZB-NEXT:    sub a1, a1, a0
+; RV32IMZB-NEXT:    slli a1, a1, 24
+; RV32IMZB-NEXT:    srli a0, a1, 31
+; RV32IMZB-NEXT:    srai a1, a1, 26
+; RV32IMZB-NEXT:    add a0, a1, a0
 ; RV32IMZB-NEXT:    ret
 ;
 ; RV64IM-LABEL: sdiv8_constant_sub_srai:
@@ -655,11 +655,11 @@ define i8 @sdiv8_constant_sub_srai(i8 %a) nounwind {
 ; RV64IM-NEXT:    li a2, 109
 ; RV64IM-NEXT:    mul a1, a1, a2
 ; RV64IM-NEXT:    srli a1, a1, 8
-; RV64IM-NEXT:    subw a0, a1, a0
-; RV64IM-NEXT:    slli a0, a0, 56
-; RV64IM-NEXT:    srli a1, a0, 63
-; RV64IM-NEXT:    srai a0, a0, 58
-; RV64IM-NEXT:    add a0, a0, a1
+; RV64IM-NEXT:    subw a1, a1, a0
+; RV64IM-NEXT:    slli a1, a1, 56
+; RV64IM-NEXT:    srli a0, a1, 63
+; RV64IM-NEXT:    srai a1, a1, 58
+; RV64IM-NEXT:    add a0, a1, a0
 ; RV64IM-NEXT:    ret
 ;
 ; RV64IMZB-LABEL: sdiv8_constant_sub_srai:
@@ -668,11 +668,11 @@ define i8 @sdiv8_constant_sub_srai(i8 %a) nounwind {
 ; RV64IMZB-NEXT:    li a2, 109
 ; RV64IMZB-NEXT:    mul a1, a1, a2
 ; RV64IMZB-NEXT:    srli a1, a1, 8
-; RV64IMZB-NEXT:    subw a0, a1, a0
-; RV64IMZB-NEXT:    slli a0, a0, 56
-; RV64IMZB-NEXT:    srli a1, a0, 63
-; RV64IMZB-NEXT:    srai a0, a0, 58
-; RV64IMZB-NEXT:    add a0, a0, a1
+; RV64IMZB-NEXT:    subw a1, a1, a0
+; RV64IMZB-NEXT:    slli a1, a1, 56
+; RV64IMZB-NEXT:    srli a0, a1, 63
+; RV64IMZB-NEXT:    srai a1, a1, 58
+; RV64IMZB-NEXT:    add a0, a1, a0
 ; RV64IMZB-NEXT:    ret
   %1 = sdiv i8 %a, -7
   ret i8 %1
@@ -849,11 +849,11 @@ define i16 @sdiv16_constant_sub_srai(i16 %a) nounwind {
 ; RV32IM-NEXT:    addi a2, a2, 1911
 ; RV32IM-NEXT:    mul a1, a1, a2
 ; RV32IM-NEXT:    srli a1, a1, 16
-; RV32IM-NEXT:    sub a0, a1, a0
-; RV32IM-NEXT:    slli a0, a0, 16
-; RV32IM-NEXT:    srli a1, a0, 31
-; RV32IM-NEXT:    srai a0, a0, 19
-; RV32IM-NEXT:    add a0, a0, a1
+; RV32IM-NEXT:    sub a1, a1, a0
+; RV32IM-NEXT:    slli a1, a1, 16
+; RV32IM-NEXT:    srli a0, a1, 31
+; RV32IM-NEXT:    srai a1, a1, 19
+; RV32IM-NEXT:    add a0, a1, a0
 ; RV32IM-NEXT:    ret
 ;
 ; RV32IMZB-LABEL: sdiv16_constant_sub_srai:
@@ -863,11 +863,11 @@ define i16 @sdiv16_constant_sub_srai(i16 %a) nounwind {
 ; RV32IMZB-NEXT:    addi a2, a2, 1911
 ; RV32IMZB-NEXT:    mul a1, a1, a2
 ; RV32IMZB-NEXT:    srli a1, a1, 16
-; RV32IMZB-NEXT:    sub a0, a1, a0
-; RV32IMZB-NEXT:    slli a0, a0, 16
-; RV32IMZB-NEXT:    srli a1, a0, 31
-; RV32IMZB-NEXT:    srai a0, a0, 19
-; RV32IMZB-NEXT:    add a0, a0, a1
+; RV32IMZB-NEXT:    sub a1, a1, a0
+; RV32IMZB-NEXT:    slli a1, a1, 16
+; RV32IMZB-NEXT:    srli a0, a1, 31
+; RV32IMZB-NEXT:    srai a1, a1, 19
+; RV32IMZB-NEXT:    add a0, a1, a0
 ; RV32IMZB-NEXT:    ret
 ;
 ; RV64IM-LABEL: sdiv16_constant_sub_srai:
@@ -878,11 +878,11 @@ define i16 @sdiv16_constant_sub_srai(i16 %a) nounwind {
 ; RV64IM-NEXT:    addiw a2, a2, 1911
 ; RV64IM-NEXT:    mul a1, a1, a2
 ; RV64IM-NEXT:    srli a1, a1, 16
-; RV64IM-NEXT:    subw a0, a1, a0
-; RV64IM-NEXT:    slli a0, a0, 48
-; RV64IM-NEXT:    srli a1, a0, 63
-; RV64IM-NEXT:    srai a0, a0, 51
-; RV64IM-NEXT:    add a0, a0, a1
+; RV64IM-NEXT:    subw a1, a1, a0
+; RV64IM-NEXT:    slli a1, a1, 48
+; RV64IM-NEXT:    srli a0, a1, 63
+; RV64IM-NEXT:    srai a1, a1, 51
+; RV64IM-NEXT:    add a0, a1, a0
 ; RV64IM-NEXT:    ret
 ;
 ; RV64IMZB-LABEL: sdiv16_constant_sub_srai:
@@ -892,11 +892,11 @@ define i16 @sdiv16_constant_sub_srai(i16 %a) nounwind {
 ; RV64IMZB-NEXT:    addiw a2, a2, 1911
 ; RV64IMZB-NEXT:    mul a1, a1, a2
 ; RV64IMZB-NEXT:    srli a1, a1, 16
-; RV64IMZB-NEXT:    subw a0, a1, a0
-; RV64IMZB-NEXT:    slli a0, a0, 48
-; RV64IMZB-NEXT:    srli a1, a0, 63
-; RV64IMZB-NEXT:    srai a0, a0, 51
-; RV64IMZB-NEXT:    add a0, a0, a1
+; RV64IMZB-NEXT:    subw a1, a1, a0
+; RV64IMZB-NEXT:    slli a1, a1, 48
+; RV64IMZB-NEXT:    srli a0, a1, 63
+; RV64IMZB-NEXT:    srai a1, a1, 51
+; RV64IMZB-NEXT:    add a0, a1, a0
 ; RV64IMZB-NEXT:    ret
   %1 = sdiv i16 %a, -15
   ret i16 %1

diff  --git a/llvm/test/CodeGen/RISCV/div-pow2.ll b/llvm/test/CodeGen/RISCV/div-pow2.ll
index 736645baecea..454061c93eae 100644
--- a/llvm/test/CodeGen/RISCV/div-pow2.ll
+++ b/llvm/test/CodeGen/RISCV/div-pow2.ll
@@ -209,9 +209,9 @@ define i64 @sdiv64_pow2_negative_2(i64 %a) {
 ; RV32I-NEXT:    sltu a0, a2, a0
 ; RV32I-NEXT:    add a1, a1, a0
 ; RV32I-NEXT:    slli a0, a1, 31
-; RV32I-NEXT:    or a2, a3, a0
-; RV32I-NEXT:    neg a0, a2
-; RV32I-NEXT:    snez a2, a2
+; RV32I-NEXT:    or a3, a3, a0
+; RV32I-NEXT:    neg a0, a3
+; RV32I-NEXT:    snez a2, a3
 ; RV32I-NEXT:    srai a1, a1, 1
 ; RV32I-NEXT:    add a1, a1, a2
 ; RV32I-NEXT:    neg a1, a1
@@ -265,9 +265,9 @@ define i64 @sdiv64_pow2_negative_2048(i64 %a) {
 ; RV32I-NEXT:    sltu a0, a2, a0
 ; RV32I-NEXT:    add a1, a1, a0
 ; RV32I-NEXT:    slli a0, a1, 21
-; RV32I-NEXT:    or a2, a3, a0
-; RV32I-NEXT:    neg a0, a2
-; RV32I-NEXT:    snez a2, a2
+; RV32I-NEXT:    or a3, a3, a0
+; RV32I-NEXT:    neg a0, a3
+; RV32I-NEXT:    snez a2, a3
 ; RV32I-NEXT:    srai a1, a1, 11
 ; RV32I-NEXT:    add a1, a1, a2
 ; RV32I-NEXT:    neg a1, a1
@@ -322,9 +322,9 @@ define i64 @sdiv64_pow2_negative_4096(i64 %a) {
 ; RV32I-NEXT:    sltu a0, a2, a0
 ; RV32I-NEXT:    add a1, a1, a0
 ; RV32I-NEXT:    slli a0, a1, 20
-; RV32I-NEXT:    or a2, a3, a0
-; RV32I-NEXT:    neg a0, a2
-; RV32I-NEXT:    snez a2, a2
+; RV32I-NEXT:    or a3, a3, a0
+; RV32I-NEXT:    neg a0, a3
+; RV32I-NEXT:    snez a2, a3
 ; RV32I-NEXT:    srai a1, a1, 12
 ; RV32I-NEXT:    add a1, a1, a2
 ; RV32I-NEXT:    neg a1, a1
@@ -379,9 +379,9 @@ define i64 @sdiv64_pow2_negative_65536(i64 %a) {
 ; RV32I-NEXT:    sltu a0, a2, a0
 ; RV32I-NEXT:    add a1, a1, a0
 ; RV32I-NEXT:    slli a0, a1, 16
-; RV32I-NEXT:    or a2, a3, a0
-; RV32I-NEXT:    neg a0, a2
-; RV32I-NEXT:    snez a2, a2
+; RV32I-NEXT:    or a3, a3, a0
+; RV32I-NEXT:    neg a0, a3
+; RV32I-NEXT:    snez a2, a3
 ; RV32I-NEXT:    srai a1, a1, 16
 ; RV32I-NEXT:    add a1, a1, a2
 ; RV32I-NEXT:    neg a1, a1

diff  --git a/llvm/test/CodeGen/RISCV/div.ll b/llvm/test/CodeGen/RISCV/div.ll
index cd7fc00fbb36..7816129735b7 100644
--- a/llvm/test/CodeGen/RISCV/div.ll
+++ b/llvm/test/CodeGen/RISCV/div.ll
@@ -197,8 +197,8 @@ define i64 @udiv64_constant(i64 %a) nounwind {
 ; RV32IM-NEXT:    mulhu a6, a5, a4
 ; RV32IM-NEXT:    add a3, a6, a3
 ; RV32IM-NEXT:    sltu a0, a0, a2
-; RV32IM-NEXT:    sub a0, a1, a0
-; RV32IM-NEXT:    mul a1, a0, a4
+; RV32IM-NEXT:    sub a1, a1, a0
+; RV32IM-NEXT:    mul a1, a1, a4
 ; RV32IM-NEXT:    add a1, a3, a1
 ; RV32IM-NEXT:    mul a0, a5, a4
 ; RV32IM-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/double-arith-strict.ll b/llvm/test/CodeGen/RISCV/double-arith-strict.ll
index eb9721a104b4..7bd315439951 100644
--- a/llvm/test/CodeGen/RISCV/double-arith-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith-strict.ll
@@ -293,8 +293,8 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv a4, a0
-; RV32I-NEXT:    lui a0, 524288
-; RV32I-NEXT:    xor a5, a1, a0
+; RV32I-NEXT:    lui a5, 524288
+; RV32I-NEXT:    xor a5, a1, a5
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a2, s1
@@ -378,9 +378,9 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv a4, a0
-; RV32I-NEXT:    lui a0, 524288
-; RV32I-NEXT:    xor a2, s5, a0
-; RV32I-NEXT:    xor a5, a1, a0
+; RV32I-NEXT:    lui a5, 524288
+; RV32I-NEXT:    xor a2, s5, a5
+; RV32I-NEXT:    xor a5, a1, a5
 ; RV32I-NEXT:    mv a0, s4
 ; RV32I-NEXT:    mv a1, a2
 ; RV32I-NEXT:    mv a2, s3
@@ -476,9 +476,9 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind strictfp {
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv a4, a0
-; RV32I-NEXT:    lui a0, 524288
-; RV32I-NEXT:    xor a3, s5, a0
-; RV32I-NEXT:    xor a5, a1, a0
+; RV32I-NEXT:    lui a5, 524288
+; RV32I-NEXT:    xor a3, s5, a5
+; RV32I-NEXT:    xor a5, a1, a5
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a2, s4
@@ -634,8 +634,8 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind strictfp {
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv a2, a0
-; RV32I-NEXT:    lui a0, 524288
-; RV32I-NEXT:    xor a3, a1, a0
+; RV32I-NEXT:    lui a3, 524288
+; RV32I-NEXT:    xor a3, a1, a3
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a4, s1

diff  --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll
index f7c4b48b7215..f2d94460d2dd 100644
--- a/llvm/test/CodeGen/RISCV/double-arith.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith.ll
@@ -196,8 +196,8 @@ define i32 @fneg_d(double %a, double %b) nounwind {
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    mv a3, a1
 ; RV32I-NEXT:    call __adddf3 at plt
-; RV32I-NEXT:    lui a2, 524288
-; RV32I-NEXT:    xor a3, a1, a2
+; RV32I-NEXT:    lui a3, 524288
+; RV32I-NEXT:    xor a3, a1, a3
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    call __eqdf2 at plt
 ; RV32I-NEXT:    seqz a0, a0
@@ -423,8 +423,8 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv a4, a0
-; RV32I-NEXT:    lui a0, 524288
-; RV32I-NEXT:    xor a5, a1, a0
+; RV32I-NEXT:    lui a5, 524288
+; RV32I-NEXT:    xor a5, a1, a5
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a2, s1
@@ -508,9 +508,9 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv a4, a0
-; RV32I-NEXT:    lui a0, 524288
-; RV32I-NEXT:    xor a2, s5, a0
-; RV32I-NEXT:    xor a5, a1, a0
+; RV32I-NEXT:    lui a5, 524288
+; RV32I-NEXT:    xor a2, s5, a5
+; RV32I-NEXT:    xor a5, a1, a5
 ; RV32I-NEXT:    mv a0, s4
 ; RV32I-NEXT:    mv a1, a2
 ; RV32I-NEXT:    mv a2, s3
@@ -606,9 +606,9 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv a4, a0
-; RV32I-NEXT:    lui a0, 524288
-; RV32I-NEXT:    xor a3, s5, a0
-; RV32I-NEXT:    xor a5, a1, a0
+; RV32I-NEXT:    lui a5, 524288
+; RV32I-NEXT:    xor a3, s5, a5
+; RV32I-NEXT:    xor a5, a1, a5
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a2, s4
@@ -832,8 +832,8 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv a2, a0
-; RV32I-NEXT:    lui a0, 524288
-; RV32I-NEXT:    xor a3, a1, a0
+; RV32I-NEXT:    lui a3, 524288
+; RV32I-NEXT:    xor a3, a1, a3
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a4, s1

diff  --git a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
index 3aa6d54332bc..e6cf098eae57 100644
--- a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
@@ -293,8 +293,8 @@ define i32 @fcmp_ueq(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    frflags a0
 ; CHECKIFD-NEXT:    flt.d a2, fa1, fa0
 ; CHECKIFD-NEXT:    fsflags a0
-; CHECKIFD-NEXT:    or a0, a2, a1
-; CHECKIFD-NEXT:    xori a0, a0, 1
+; CHECKIFD-NEXT:    or a1, a2, a1
+; CHECKIFD-NEXT:    xori a0, a1, 1
 ; CHECKIFD-NEXT:    feq.d zero, fa1, fa0
 ; CHECKIFD-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/float-arith-strict.ll b/llvm/test/CodeGen/RISCV/float-arith-strict.ll
index 89226ceab9e1..80158daea446 100644
--- a/llvm/test/CodeGen/RISCV/float-arith-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-arith-strict.ll
@@ -279,8 +279,8 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind strictfp {
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    xor a2, a0, a1
+; RV32I-NEXT:    lui a2, 524288
+; RV32I-NEXT:    xor a2, a0, a2
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    call fmaf at plt
@@ -301,8 +301,8 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind strictfp {
 ; RV64I-NEXT:    mv a0, a2
 ; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    lui a1, 524288
-; RV64I-NEXT:    xor a2, a0, a1
+; RV64I-NEXT:    lui a2, 524288
+; RV64I-NEXT:    xor a2, a0, a2
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
 ; RV64I-NEXT:    call fmaf at plt

diff  --git a/llvm/test/CodeGen/RISCV/float-arith.ll b/llvm/test/CodeGen/RISCV/float-arith.ll
index 4655d46686f3..63fcfcfaa009 100644
--- a/llvm/test/CodeGen/RISCV/float-arith.ll
+++ b/llvm/test/CodeGen/RISCV/float-arith.ll
@@ -416,8 +416,8 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    xor a2, a0, a1
+; RV32I-NEXT:    lui a2, 524288
+; RV32I-NEXT:    xor a2, a0, a2
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    call fmaf at plt
@@ -438,8 +438,8 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    mv a0, a2
 ; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    lui a1, 524288
-; RV64I-NEXT:    xor a2, a0, a1
+; RV64I-NEXT:    lui a2, 524288
+; RV64I-NEXT:    xor a2, a0, a2
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
 ; RV64I-NEXT:    call fmaf at plt

diff  --git a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
index 72e0d578be1f..f1ce0ccece51 100644
--- a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
@@ -195,8 +195,8 @@ define double @bitcast_double_xor(double %a1, double %a2) nounwind {
 ; RV32F-NEXT:    mv s1, a0
 ; RV32F-NEXT:    call __muldf3 at plt
 ; RV32F-NEXT:    mv a2, a0
-; RV32F-NEXT:    lui a0, 524288
-; RV32F-NEXT:    xor a3, a1, a0
+; RV32F-NEXT:    lui a3, 524288
+; RV32F-NEXT:    xor a3, a1, a3
 ; RV32F-NEXT:    mv a0, s1
 ; RV32F-NEXT:    mv a1, s0
 ; RV32F-NEXT:    call __muldf3 at plt
@@ -321,8 +321,8 @@ define double @bitcast_double_or(double %a1, double %a2) nounwind {
 ; RV32F-NEXT:    mv s1, a0
 ; RV32F-NEXT:    call __muldf3 at plt
 ; RV32F-NEXT:    mv a2, a0
-; RV32F-NEXT:    lui a0, 524288
-; RV32F-NEXT:    or a3, a1, a0
+; RV32F-NEXT:    lui a3, 524288
+; RV32F-NEXT:    or a3, a1, a3
 ; RV32F-NEXT:    mv a0, s1
 ; RV32F-NEXT:    mv a1, s0
 ; RV32F-NEXT:    call __muldf3 at plt

diff  --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index 0b72c8716ff1..92b79f8f1d22 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -780,8 +780,8 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    call __gtsf2 at plt
 ; RV32I-NEXT:    sgtz a0, a0
-; RV32I-NEXT:    neg a0, a0
-; RV32I-NEXT:    or a1, a0, s1
+; RV32I-NEXT:    neg a1, a0
+; RV32I-NEXT:    or a1, a1, s1
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
index de9ed5e25821..2fda42008d45 100644
--- a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
@@ -285,8 +285,8 @@ define i32 @fcmp_ueq(float %a, float %b) nounwind strictfp {
 ; CHECKIF-NEXT:    frflags a0
 ; CHECKIF-NEXT:    flt.s a2, fa1, fa0
 ; CHECKIF-NEXT:    fsflags a0
-; CHECKIF-NEXT:    or a0, a2, a1
-; CHECKIF-NEXT:    xori a0, a0, 1
+; CHECKIF-NEXT:    or a1, a2, a1
+; CHECKIF-NEXT:    xori a0, a1, 1
 ; CHECKIF-NEXT:    feq.s zero, fa1, fa0
 ; CHECKIF-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/forced-atomics.ll b/llvm/test/CodeGen/RISCV/forced-atomics.ll
index 6eda04bfa0c6..4c8176419f72 100644
--- a/llvm/test/CodeGen/RISCV/forced-atomics.ll
+++ b/llvm/test/CodeGen/RISCV/forced-atomics.ll
@@ -2504,8 +2504,8 @@ define i64 @rmw64_max_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    j .LBB49_2
 ; RV32-NEXT:  .LBB49_1: # %atomicrmw.start
 ; RV32-NEXT:    # in Loop: Header=BB49_2 Depth=1
-; RV32-NEXT:    neg a0, a0
-; RV32-NEXT:    and a3, a0, a1
+; RV32-NEXT:    neg a3, a0
+; RV32-NEXT:    and a3, a3, a1
 ; RV32-NEXT:    sw a4, 0(sp)
 ; RV32-NEXT:    sw a1, 4(sp)
 ; RV32-NEXT:    mv a1, sp
@@ -2599,8 +2599,8 @@ define i64 @rmw64_min_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    j .LBB50_2
 ; RV32-NEXT:  .LBB50_1: # %atomicrmw.start
 ; RV32-NEXT:    # in Loop: Header=BB50_2 Depth=1
-; RV32-NEXT:    neg a0, a0
-; RV32-NEXT:    and a3, a0, a1
+; RV32-NEXT:    neg a3, a0
+; RV32-NEXT:    and a3, a3, a1
 ; RV32-NEXT:    sw a4, 0(sp)
 ; RV32-NEXT:    sw a1, 4(sp)
 ; RV32-NEXT:    mv a1, sp
@@ -2696,8 +2696,8 @@ define i64 @rmw64_umax_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    j .LBB51_2
 ; RV32-NEXT:  .LBB51_1: # %atomicrmw.start
 ; RV32-NEXT:    # in Loop: Header=BB51_2 Depth=1
-; RV32-NEXT:    neg a0, a0
-; RV32-NEXT:    and a3, a0, a1
+; RV32-NEXT:    neg a3, a0
+; RV32-NEXT:    and a3, a3, a1
 ; RV32-NEXT:    sw a4, 0(sp)
 ; RV32-NEXT:    sw a1, 4(sp)
 ; RV32-NEXT:    mv a1, sp
@@ -2791,8 +2791,8 @@ define i64 @rmw64_umin_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    j .LBB52_2
 ; RV32-NEXT:  .LBB52_1: # %atomicrmw.start
 ; RV32-NEXT:    # in Loop: Header=BB52_2 Depth=1
-; RV32-NEXT:    neg a0, a0
-; RV32-NEXT:    and a3, a0, a1
+; RV32-NEXT:    neg a3, a0
+; RV32-NEXT:    and a3, a3, a1
 ; RV32-NEXT:    sw a4, 0(sp)
 ; RV32-NEXT:    sw a1, 4(sp)
 ; RV32-NEXT:    mv a1, sp

diff  --git a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
index fc28e156801d..92afb4c73048 100644
--- a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
@@ -2369,20 +2369,20 @@ define <2 x i64> @ustest_f64i64(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    beqz a1, .LBB20_7
 ; CHECK-NOV-NEXT:  # %bb.5: # %entry
 ; CHECK-NOV-NEXT:    sgtz a1, a1
-; CHECK-NOV-NEXT:    and a0, a4, s0
+; CHECK-NOV-NEXT:    and a4, a4, s0
 ; CHECK-NOV-NEXT:    bnez a2, .LBB20_8
 ; CHECK-NOV-NEXT:  .LBB20_6:
-; CHECK-NOV-NEXT:    snez a2, a0
+; CHECK-NOV-NEXT:    snez a0, a4
 ; CHECK-NOV-NEXT:    j .LBB20_9
 ; CHECK-NOV-NEXT:  .LBB20_7:
 ; CHECK-NOV-NEXT:    snez a1, a3
-; CHECK-NOV-NEXT:    and a0, a4, s0
+; CHECK-NOV-NEXT:    and a4, a4, s0
 ; CHECK-NOV-NEXT:    beqz a2, .LBB20_6
 ; CHECK-NOV-NEXT:  .LBB20_8: # %entry
-; CHECK-NOV-NEXT:    sgtz a2, a2
+; CHECK-NOV-NEXT:    sgtz a0, a2
 ; CHECK-NOV-NEXT:  .LBB20_9: # %entry
-; CHECK-NOV-NEXT:    neg a2, a2
-; CHECK-NOV-NEXT:    and a0, a2, a0
+; CHECK-NOV-NEXT:    neg a0, a0
+; CHECK-NOV-NEXT:    and a0, a0, a4
 ; CHECK-NOV-NEXT:    neg a1, a1
 ; CHECK-NOV-NEXT:    and a1, a1, a3
 ; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -2435,20 +2435,20 @@ define <2 x i64> @ustest_f64i64(<2 x double> %x) {
 ; CHECK-V-NEXT:    beqz a2, .LBB20_7
 ; CHECK-V-NEXT:  # %bb.5: # %entry
 ; CHECK-V-NEXT:    sgtz a1, a2
-; CHECK-V-NEXT:    and a2, a3, s1
+; CHECK-V-NEXT:    and a3, a3, s1
 ; CHECK-V-NEXT:    bnez s0, .LBB20_8
 ; CHECK-V-NEXT:  .LBB20_6:
-; CHECK-V-NEXT:    snez a3, a2
+; CHECK-V-NEXT:    snez a2, a3
 ; CHECK-V-NEXT:    j .LBB20_9
 ; CHECK-V-NEXT:  .LBB20_7:
 ; CHECK-V-NEXT:    snez a1, a0
-; CHECK-V-NEXT:    and a2, a3, s1
+; CHECK-V-NEXT:    and a3, a3, s1
 ; CHECK-V-NEXT:    beqz s0, .LBB20_6
 ; CHECK-V-NEXT:  .LBB20_8: # %entry
-; CHECK-V-NEXT:    sgtz a3, s0
+; CHECK-V-NEXT:    sgtz a2, s0
 ; CHECK-V-NEXT:  .LBB20_9: # %entry
-; CHECK-V-NEXT:    neg a3, a3
-; CHECK-V-NEXT:    and a2, a3, a2
+; CHECK-V-NEXT:    neg a2, a2
+; CHECK-V-NEXT:    and a2, a2, a3
 ; CHECK-V-NEXT:    neg a1, a1
 ; CHECK-V-NEXT:    and a0, a1, a0
 ; CHECK-V-NEXT:    sd a0, 24(sp)
@@ -2789,20 +2789,20 @@ define <2 x i64> @ustest_f32i64(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    beqz a1, .LBB23_7
 ; CHECK-NOV-NEXT:  # %bb.5: # %entry
 ; CHECK-NOV-NEXT:    sgtz a1, a1
-; CHECK-NOV-NEXT:    and a0, a4, s0
+; CHECK-NOV-NEXT:    and a4, a4, s0
 ; CHECK-NOV-NEXT:    bnez a2, .LBB23_8
 ; CHECK-NOV-NEXT:  .LBB23_6:
-; CHECK-NOV-NEXT:    snez a2, a0
+; CHECK-NOV-NEXT:    snez a0, a4
 ; CHECK-NOV-NEXT:    j .LBB23_9
 ; CHECK-NOV-NEXT:  .LBB23_7:
 ; CHECK-NOV-NEXT:    snez a1, a3
-; CHECK-NOV-NEXT:    and a0, a4, s0
+; CHECK-NOV-NEXT:    and a4, a4, s0
 ; CHECK-NOV-NEXT:    beqz a2, .LBB23_6
 ; CHECK-NOV-NEXT:  .LBB23_8: # %entry
-; CHECK-NOV-NEXT:    sgtz a2, a2
+; CHECK-NOV-NEXT:    sgtz a0, a2
 ; CHECK-NOV-NEXT:  .LBB23_9: # %entry
-; CHECK-NOV-NEXT:    neg a2, a2
-; CHECK-NOV-NEXT:    and a0, a2, a0
+; CHECK-NOV-NEXT:    neg a0, a0
+; CHECK-NOV-NEXT:    and a0, a0, a4
 ; CHECK-NOV-NEXT:    neg a1, a1
 ; CHECK-NOV-NEXT:    and a1, a1, a3
 ; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -2855,20 +2855,20 @@ define <2 x i64> @ustest_f32i64(<2 x float> %x) {
 ; CHECK-V-NEXT:    beqz a2, .LBB23_7
 ; CHECK-V-NEXT:  # %bb.5: # %entry
 ; CHECK-V-NEXT:    sgtz a1, a2
-; CHECK-V-NEXT:    and a2, a3, s1
+; CHECK-V-NEXT:    and a3, a3, s1
 ; CHECK-V-NEXT:    bnez s0, .LBB23_8
 ; CHECK-V-NEXT:  .LBB23_6:
-; CHECK-V-NEXT:    snez a3, a2
+; CHECK-V-NEXT:    snez a2, a3
 ; CHECK-V-NEXT:    j .LBB23_9
 ; CHECK-V-NEXT:  .LBB23_7:
 ; CHECK-V-NEXT:    snez a1, a0
-; CHECK-V-NEXT:    and a2, a3, s1
+; CHECK-V-NEXT:    and a3, a3, s1
 ; CHECK-V-NEXT:    beqz s0, .LBB23_6
 ; CHECK-V-NEXT:  .LBB23_8: # %entry
-; CHECK-V-NEXT:    sgtz a3, s0
+; CHECK-V-NEXT:    sgtz a2, s0
 ; CHECK-V-NEXT:  .LBB23_9: # %entry
-; CHECK-V-NEXT:    neg a3, a3
-; CHECK-V-NEXT:    and a2, a3, a2
+; CHECK-V-NEXT:    neg a2, a2
+; CHECK-V-NEXT:    and a2, a2, a3
 ; CHECK-V-NEXT:    neg a1, a1
 ; CHECK-V-NEXT:    and a0, a1, a0
 ; CHECK-V-NEXT:    sd a0, 24(sp)
@@ -3198,20 +3198,20 @@ define <2 x i64> @ustest_f16i64(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    beqz a1, .LBB26_7
 ; CHECK-NOV-NEXT:  # %bb.5: # %entry
 ; CHECK-NOV-NEXT:    sgtz a1, a1
-; CHECK-NOV-NEXT:    and a0, a4, s0
+; CHECK-NOV-NEXT:    and a4, a4, s0
 ; CHECK-NOV-NEXT:    bnez a2, .LBB26_8
 ; CHECK-NOV-NEXT:  .LBB26_6:
-; CHECK-NOV-NEXT:    snez a2, a0
+; CHECK-NOV-NEXT:    snez a0, a4
 ; CHECK-NOV-NEXT:    j .LBB26_9
 ; CHECK-NOV-NEXT:  .LBB26_7:
 ; CHECK-NOV-NEXT:    snez a1, a3
-; CHECK-NOV-NEXT:    and a0, a4, s0
+; CHECK-NOV-NEXT:    and a4, a4, s0
 ; CHECK-NOV-NEXT:    beqz a2, .LBB26_6
 ; CHECK-NOV-NEXT:  .LBB26_8: # %entry
-; CHECK-NOV-NEXT:    sgtz a2, a2
+; CHECK-NOV-NEXT:    sgtz a0, a2
 ; CHECK-NOV-NEXT:  .LBB26_9: # %entry
-; CHECK-NOV-NEXT:    neg a2, a2
-; CHECK-NOV-NEXT:    and a0, a2, a0
+; CHECK-NOV-NEXT:    neg a0, a0
+; CHECK-NOV-NEXT:    and a0, a0, a4
 ; CHECK-NOV-NEXT:    neg a1, a1
 ; CHECK-NOV-NEXT:    and a1, a1, a3
 ; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -5363,8 +5363,8 @@ define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
 ; CHECK-NOV-NEXT:  .LBB45_7: # %entry
 ; CHECK-NOV-NEXT:    slti a6, a1, 0
 ; CHECK-NOV-NEXT:    slti a3, s1, 0
-; CHECK-NOV-NEXT:    neg a3, a3
-; CHECK-NOV-NEXT:    and a4, a3, s1
+; CHECK-NOV-NEXT:    neg a4, a3
+; CHECK-NOV-NEXT:    and a4, a4, s1
 ; CHECK-NOV-NEXT:    slli a3, a0, 63
 ; CHECK-NOV-NEXT:    mv a5, s0
 ; CHECK-NOV-NEXT:    bltz a4, .LBB45_20
@@ -5474,8 +5474,8 @@ define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
 ; CHECK-V-NEXT:    mv a0, a4
 ; CHECK-V-NEXT:  .LBB45_7: # %entry
 ; CHECK-V-NEXT:    slti a3, s1, 0
-; CHECK-V-NEXT:    neg a3, a3
-; CHECK-V-NEXT:    and a4, a3, s1
+; CHECK-V-NEXT:    neg a4, a3
+; CHECK-V-NEXT:    and a4, a4, s1
 ; CHECK-V-NEXT:    slti a6, a1, 0
 ; CHECK-V-NEXT:    slli a3, a2, 63
 ; CHECK-V-NEXT:    mv a5, s0
@@ -5850,8 +5850,8 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
 ; CHECK-NOV-NEXT:  .LBB48_7: # %entry
 ; CHECK-NOV-NEXT:    slti a6, a1, 0
 ; CHECK-NOV-NEXT:    slti a3, s1, 0
-; CHECK-NOV-NEXT:    neg a3, a3
-; CHECK-NOV-NEXT:    and a4, a3, s1
+; CHECK-NOV-NEXT:    neg a4, a3
+; CHECK-NOV-NEXT:    and a4, a4, s1
 ; CHECK-NOV-NEXT:    slli a3, a0, 63
 ; CHECK-NOV-NEXT:    mv a5, s0
 ; CHECK-NOV-NEXT:    bltz a4, .LBB48_20
@@ -5961,8 +5961,8 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
 ; CHECK-V-NEXT:    mv a0, a4
 ; CHECK-V-NEXT:  .LBB48_7: # %entry
 ; CHECK-V-NEXT:    slti a3, s1, 0
-; CHECK-V-NEXT:    neg a3, a3
-; CHECK-V-NEXT:    and a4, a3, s1
+; CHECK-V-NEXT:    neg a4, a3
+; CHECK-V-NEXT:    and a4, a4, s1
 ; CHECK-V-NEXT:    slti a6, a1, 0
 ; CHECK-V-NEXT:    slli a3, a2, 63
 ; CHECK-V-NEXT:    mv a5, s0
@@ -6339,8 +6339,8 @@ define <2 x i64> @stest_f16i64_mm(<2 x half> %x) {
 ; CHECK-NOV-NEXT:  .LBB51_7: # %entry
 ; CHECK-NOV-NEXT:    slti a6, a1, 0
 ; CHECK-NOV-NEXT:    slti a3, s1, 0
-; CHECK-NOV-NEXT:    neg a3, a3
-; CHECK-NOV-NEXT:    and a4, a3, s1
+; CHECK-NOV-NEXT:    neg a4, a3
+; CHECK-NOV-NEXT:    and a4, a4, s1
 ; CHECK-NOV-NEXT:    slli a3, a0, 63
 ; CHECK-NOV-NEXT:    mv a5, s0
 ; CHECK-NOV-NEXT:    bltz a4, .LBB51_20
@@ -6445,8 +6445,8 @@ define <2 x i64> @stest_f16i64_mm(<2 x half> %x) {
 ; CHECK-V-NEXT:  .LBB51_7: # %entry
 ; CHECK-V-NEXT:    slti a6, a1, 0
 ; CHECK-V-NEXT:    slti a3, s1, 0
-; CHECK-V-NEXT:    neg a3, a3
-; CHECK-V-NEXT:    and a4, a3, s1
+; CHECK-V-NEXT:    neg a4, a3
+; CHECK-V-NEXT:    and a4, a4, s1
 ; CHECK-V-NEXT:    slli a3, a2, 63
 ; CHECK-V-NEXT:    mv a5, s0
 ; CHECK-V-NEXT:    bltz a4, .LBB51_20

diff  --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index 9ef663c62936..760a97653582 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -1217,8 +1217,8 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    call __gesf2 at plt
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    addi a0, a0, -1
-; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    or a1, s1, a0
+; RV32I-NEXT:    and a1, a0, s2
+; RV32I-NEXT:    or a1, s1, a1
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll
index f048a9c67761..772f4a98f666 100644
--- a/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll
+++ b/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll
@@ -111,8 +111,8 @@ define i32 @fcmp_ueq(half %a, half %b) nounwind strictfp {
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    flt.h a2, fa1, fa0
 ; CHECK-NEXT:    fsflags a0
-; CHECK-NEXT:    or a0, a2, a1
-; CHECK-NEXT:    xori a0, a0, 1
+; CHECK-NEXT:    or a1, a2, a1
+; CHECK-NEXT:    xori a0, a1, 1
 ; CHECK-NEXT:    feq.h zero, fa1, fa0
 ; CHECK-NEXT:    ret
   %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp

diff  --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll
index fbaa0040c3f6..ee060201afa3 100644
--- a/llvm/test/CodeGen/RISCV/mul.ll
+++ b/llvm/test/CodeGen/RISCV/mul.ll
@@ -1149,17 +1149,17 @@ define i128 @muli128_m3840(i128 %a) nounwind {
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    sltu t1, a5, a3
 ; RV32I-NEXT:  .LBB30_2:
-; RV32I-NEXT:    sub a1, a2, a1
-; RV32I-NEXT:    sltu a2, a1, t1
-; RV32I-NEXT:    sub a2, t0, a2
-; RV32I-NEXT:    sub a1, a1, t1
-; RV32I-NEXT:    sub a3, a5, a3
-; RV32I-NEXT:    sub a3, a3, a7
+; RV32I-NEXT:    sub a2, a2, a1
+; RV32I-NEXT:    sltu a1, a2, t1
+; RV32I-NEXT:    sub a1, t0, a1
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sub a5, a5, a3
+; RV32I-NEXT:    sub a3, a5, a7
 ; RV32I-NEXT:    sub a4, a6, a4
 ; RV32I-NEXT:    sw a4, 0(a0)
 ; RV32I-NEXT:    sw a3, 4(a0)
-; RV32I-NEXT:    sw a1, 8(a0)
-; RV32I-NEXT:    sw a2, 12(a0)
+; RV32I-NEXT:    sw a2, 8(a0)
+; RV32I-NEXT:    sw a1, 12(a0)
 ; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: muli128_m3840:
@@ -1203,16 +1203,16 @@ define i128 @muli128_m3840(i128 %a) nounwind {
 ; RV32IM-NEXT:    sub a3, t1, a3
 ; RV32IM-NEXT:    add a2, a3, a2
 ; RV32IM-NEXT:    sub a3, t3, a4
-; RV32IM-NEXT:    sub a1, a3, a1
-; RV32IM-NEXT:    add a1, a1, a2
-; RV32IM-NEXT:    add a1, a1, t0
-; RV32IM-NEXT:    add a1, a7, a1
-; RV32IM-NEXT:    add a1, a1, s0
-; RV32IM-NEXT:    mul a2, a4, a5
-; RV32IM-NEXT:    sw a2, 0(a0)
+; RV32IM-NEXT:    sub a3, a3, a1
+; RV32IM-NEXT:    add a2, a3, a2
+; RV32IM-NEXT:    add a2, a2, t0
+; RV32IM-NEXT:    add a2, a7, a2
+; RV32IM-NEXT:    add a2, a2, s0
+; RV32IM-NEXT:    mul a1, a4, a5
+; RV32IM-NEXT:    sw a1, 0(a0)
 ; RV32IM-NEXT:    sw a6, 4(a0)
 ; RV32IM-NEXT:    sw t6, 8(a0)
-; RV32IM-NEXT:    sw a1, 12(a0)
+; RV32IM-NEXT:    sw a2, 12(a0)
 ; RV32IM-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
@@ -1280,8 +1280,8 @@ define i128 @muli128_m63(i128 %a) nounwind {
 ; RV32I-NEXT:    sub a7, t2, t0
 ; RV32I-NEXT:    sub a3, a3, a6
 ; RV32I-NEXT:    sub a3, a3, a4
-; RV32I-NEXT:    sub a1, a2, a1
-; RV32I-NEXT:    sw a1, 0(a0)
+; RV32I-NEXT:    sub a2, a2, a1
+; RV32I-NEXT:    sw a2, 0(a0)
 ; RV32I-NEXT:    sw a3, 4(a0)
 ; RV32I-NEXT:    sw a7, 8(a0)
 ; RV32I-NEXT:    sw a5, 12(a0)
@@ -1327,19 +1327,19 @@ define i128 @muli128_m63(i128 %a) nounwind {
 ; RV32IM-NEXT:    slli t1, a2, 6
 ; RV32IM-NEXT:    sub a2, a2, t1
 ; RV32IM-NEXT:    mulhu a5, a1, a5
-; RV32IM-NEXT:    sub a1, a5, a1
+; RV32IM-NEXT:    sub a5, a5, a1
+; RV32IM-NEXT:    add a2, a5, a2
+; RV32IM-NEXT:    sub a1, t3, a3
+; RV32IM-NEXT:    sub a1, a1, a4
 ; RV32IM-NEXT:    add a1, a1, a2
-; RV32IM-NEXT:    sub a2, t3, a3
-; RV32IM-NEXT:    sub a2, a2, a4
-; RV32IM-NEXT:    add a1, a2, a1
 ; RV32IM-NEXT:    neg a2, t5
 ; RV32IM-NEXT:    sltu a2, a2, t0
 ; RV32IM-NEXT:    add a1, a1, a2
 ; RV32IM-NEXT:    add a1, a7, a1
 ; RV32IM-NEXT:    add a1, a1, s0
 ; RV32IM-NEXT:    slli a2, a3, 6
-; RV32IM-NEXT:    sub a2, a3, a2
-; RV32IM-NEXT:    sw a2, 0(a0)
+; RV32IM-NEXT:    sub a3, a3, a2
+; RV32IM-NEXT:    sw a3, 0(a0)
 ; RV32IM-NEXT:    sw a6, 4(a0)
 ; RV32IM-NEXT:    sw t6, 8(a0)
 ; RV32IM-NEXT:    sw a1, 12(a0)
@@ -1594,29 +1594,29 @@ define i8 @muladd_demand_2(i8 %x, i8 %y) nounwind {
 ; RV32I-LABEL: muladd_demand_2:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slli a0, a0, 1
-; RV32I-NEXT:    sub a0, a1, a0
-; RV32I-NEXT:    ori a0, a0, -16
+; RV32I-NEXT:    sub a1, a1, a0
+; RV32I-NEXT:    ori a0, a1, -16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: muladd_demand_2:
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    slli a0, a0, 1
-; RV32IM-NEXT:    sub a0, a1, a0
-; RV32IM-NEXT:    ori a0, a0, -16
+; RV32IM-NEXT:    sub a1, a1, a0
+; RV32IM-NEXT:    ori a0, a1, -16
 ; RV32IM-NEXT:    ret
 ;
 ; RV64I-LABEL: muladd_demand_2:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slliw a0, a0, 1
-; RV64I-NEXT:    subw a0, a1, a0
-; RV64I-NEXT:    ori a0, a0, -16
+; RV64I-NEXT:    subw a1, a1, a0
+; RV64I-NEXT:    ori a0, a1, -16
 ; RV64I-NEXT:    ret
 ;
 ; RV64IM-LABEL: muladd_demand_2:
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    slliw a0, a0, 1
-; RV64IM-NEXT:    subw a0, a1, a0
-; RV64IM-NEXT:    ori a0, a0, -16
+; RV64IM-NEXT:    subw a1, a1, a0
+; RV64IM-NEXT:    ori a0, a1, -16
 ; RV64IM-NEXT:    ret
   %m = mul i8 %x, 14
   %a = add i8 %y, %m

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
index 2671d5023b4f..261aece9e8a7 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
@@ -306,9 +306,9 @@ define i64 @not_shl_one_i64(i64 %x) {
 ; CHECK-NEXT:    and a2, a4, a2
 ; CHECK-NEXT:    sll a0, a1, a0
 ; CHECK-NEXT:    addi a3, a3, -1
-; CHECK-NEXT:    and a1, a3, a0
+; CHECK-NEXT:    and a3, a3, a0
 ; CHECK-NEXT:    not a0, a2
-; CHECK-NEXT:    not a1, a1
+; CHECK-NEXT:    not a1, a3
 ; CHECK-NEXT:    ret
   %1 = shl i64 1, %x
   %2 = xor i64 %1, -1

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll
index 55d571d25ab4..5dc6bf8001f5 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll
@@ -327,13 +327,13 @@ define i64 @ctpop_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    lui a2, 349525
 ; RV32I-NEXT:    addi s2, a2, 1365
 ; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    sub a0, a1, a0
-; RV32I-NEXT:    lui a1, 209715
-; RV32I-NEXT:    addi s3, a1, 819
-; RV32I-NEXT:    and a1, a0, s3
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    sub a1, a1, a0
+; RV32I-NEXT:    lui a0, 209715
+; RV32I-NEXT:    addi s3, a0, 819
+; RV32I-NEXT:    and a0, a1, s3
+; RV32I-NEXT:    srli a1, a1, 2
+; RV32I-NEXT:    and a1, a1, s3
+; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    srli a1, a0, 4
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    lui a1, 61681
@@ -346,11 +346,11 @@ define i64 @ctpop_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    srli s5, a0, 24
 ; RV32I-NEXT:    srli a0, s0, 1
 ; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    sub a0, s0, a0
-; RV32I-NEXT:    and a1, a0, s3
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    sub s0, s0, a0
+; RV32I-NEXT:    and a0, s0, s3
+; RV32I-NEXT:    srli s0, s0, 2
+; RV32I-NEXT:    and a1, s0, s3
+; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    srli a1, a0, 4
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    and a0, a0, s4

diff  --git a/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll b/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll
index 55620af70215..02dc155f41fe 100644
--- a/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll
+++ b/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll
@@ -134,9 +134,9 @@ define signext i32 @test10(i32* %0, i64 %1) {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lui a2, 30141
 ; RV64I-NEXT:    addiw a2, a2, -747
-; RV64I-NEXT:    subw a1, a2, a1
-; RV64I-NEXT:    slli a1, a1, 2
-; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    subw a2, a2, a1
+; RV64I-NEXT:    slli a2, a2, 2
+; RV64I-NEXT:    add a0, a0, a2
 ; RV64I-NEXT:    lw a0, 0(a0)
 ; RV64I-NEXT:    ret
   %3 = mul i64 %1, -4294967296
@@ -179,8 +179,8 @@ define i8 @test13(i8* %0, i64 %1) {
 ; RV64I-NEXT:    add a2, a0, a2
 ; RV64I-NEXT:    lb a2, 0(a2)
 ; RV64I-NEXT:    li a3, 2
-; RV64I-NEXT:    subw a1, a3, a1
-; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    subw a3, a3, a1
+; RV64I-NEXT:    add a0, a0, a3
 ; RV64I-NEXT:    lb a0, 0(a0)
 ; RV64I-NEXT:    add a0, a2, a0
 ; RV64I-NEXT:    ret
@@ -201,11 +201,11 @@ define signext i32 @test14(i8* %0, i32* %1, i64 %2) {
 ; RV64I-LABEL: test14:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    li a3, 1
-; RV64I-NEXT:    subw a2, a3, a2
-; RV64I-NEXT:    add a0, a0, a2
+; RV64I-NEXT:    subw a3, a3, a2
+; RV64I-NEXT:    add a0, a0, a3
 ; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    slli a2, a2, 2
-; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    slli a3, a3, 2
+; RV64I-NEXT:    add a1, a1, a3
 ; RV64I-NEXT:    lw a1, 0(a1)
 ; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll
index 3193a4a7af0b..713f0c05c055 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll
@@ -1018,11 +1018,11 @@ define i64 @bswap_i64(i64 %a) {
 ; RV64I-NEXT:    slli a5, a5, 24
 ; RV64I-NEXT:    or a3, a5, a3
 ; RV64I-NEXT:    or a1, a3, a1
-; RV64I-NEXT:    and a3, a0, a4
-; RV64I-NEXT:    slli a3, a3, 24
-; RV64I-NEXT:    srliw a4, a0, 24
-; RV64I-NEXT:    slli a4, a4, 32
-; RV64I-NEXT:    or a3, a3, a4
+; RV64I-NEXT:    and a4, a0, a4
+; RV64I-NEXT:    slli a4, a4, 24
+; RV64I-NEXT:    srliw a3, a0, 24
+; RV64I-NEXT:    slli a3, a3, 32
+; RV64I-NEXT:    or a3, a4, a3
 ; RV64I-NEXT:    and a2, a0, a2
 ; RV64I-NEXT:    slli a2, a2, 40
 ; RV64I-NEXT:    slli a0, a0, 56

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll
index b10255121d26..6f1f8ffae167 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll
@@ -847,8 +847,8 @@ define void @strided_load_startval_add_with_splat(i8* noalias nocapture %0, i8*
 ; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    beq a2, a3, .LBB13_7
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    li a3, 1023
-; CHECK-NEXT:    subw a4, a3, a2
+; CHECK-NEXT:    li a4, 1023
+; CHECK-NEXT:    subw a4, a4, a2
 ; CHECK-NEXT:    li a5, 31
 ; CHECK-NEXT:    mv a3, a2
 ; CHECK-NEXT:    bltu a4, a5, .LBB13_5

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
index 110da69feac9..0072cc9b54cb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
@@ -650,8 +650,8 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
 ; CHECK-NEXT:    vle8.v v24, (a0)
 ; CHECK-NEXT:    addi a0, sp, 16
 ; CHECK-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT:    and a0, a4, a2
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    and a2, a4, a2
+; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
 ; CHECK-NEXT:    vmseq.vv v1, v16, v8, v0.t
 ; CHECK-NEXT:    bltu a3, a1, .LBB51_2
 ; CHECK-NEXT:  # %bb.1:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
index 99f66d7e463b..5b903198ee0a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
@@ -1328,8 +1328,8 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    addi a2, sp, 16
 ; CHECK-NEXT:    vs8r.v v8, (a2) # Unknown-size Folded Spill
 ; CHECK-NEXT:    vl8re64.v v0, (a0)
-; CHECK-NEXT:    and a0, a5, a3
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    and a3, a5, a3
+; CHECK-NEXT:    vsetvli zero, a3, e64, m8, ta, ma
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add a0, sp, a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
index 088e9fbb786e..a18e4d5846d4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
@@ -1328,8 +1328,8 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    addi a2, sp, 16
 ; CHECK-NEXT:    vs8r.v v8, (a2) # Unknown-size Folded Spill
 ; CHECK-NEXT:    vl8re64.v v0, (a0)
-; CHECK-NEXT:    and a0, a5, a3
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    and a3, a5, a3
+; CHECK-NEXT:    vsetvli zero, a3, e64, m8, ta, ma
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add a0, sp, a0

diff  --git a/llvm/test/CodeGen/RISCV/select-binop-identity.ll b/llvm/test/CodeGen/RISCV/select-binop-identity.ll
index 33f22e8abb5a..74fee7aed8b5 100644
--- a/llvm/test/CodeGen/RISCV/select-binop-identity.ll
+++ b/llvm/test/CodeGen/RISCV/select-binop-identity.ll
@@ -226,8 +226,8 @@ define i64 @sub_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
 ; RV32I-NEXT:    beqz a0, .LBB9_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    sltu a0, a3, a1
-; RV32I-NEXT:    sub a2, a4, a2
-; RV32I-NEXT:    sub a4, a2, a0
+; RV32I-NEXT:    sub a4, a4, a2
+; RV32I-NEXT:    sub a4, a4, a0
 ; RV32I-NEXT:    sub a3, a3, a1
 ; RV32I-NEXT:  .LBB9_2:
 ; RV32I-NEXT:    mv a0, a3

diff  --git a/llvm/test/CodeGen/RISCV/setcc-logic.ll b/llvm/test/CodeGen/RISCV/setcc-logic.ll
index 3e822b88e6c6..6a6c1bcd8dec 100644
--- a/llvm/test/CodeGen/RISCV/setcc-logic.ll
+++ b/llvm/test/CodeGen/RISCV/setcc-logic.ll
@@ -8,16 +8,16 @@ define i1 @and_icmp_eq(i32 signext %a, i32 signext %b, i32 signext %c, i32 signe
 ; RV32I-LABEL: and_icmp_eq:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    xor a1, a2, a3
-; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    or a0, a0, a2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: and_icmp_eq:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    xor a1, a2, a3
-; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    or a0, a0, a2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ret
   %cmp1 = icmp eq i32 %a, %b
@@ -30,16 +30,16 @@ define i1 @or_icmp_ne(i32 signext %a, i32 signext %b, i32 signext %c, i32 signex
 ; RV32I-LABEL: or_icmp_ne:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    xor a1, a2, a3
-; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    or a0, a0, a2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: or_icmp_ne:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    xor a1, a2, a3
-; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    or a0, a0, a2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ret
   %cmp1 = icmp ne i32 %a, %b
@@ -299,8 +299,8 @@ define void @and_sge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-LABEL: and_sge_eq:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slt a0, a0, a1
-; RV32I-NEXT:    xor a1, a2, a3
-; RV32I-NEXT:    snez a1, a1
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    snez a1, a2
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    bnez a0, .LBB13_2
 ; RV32I-NEXT:  # %bb.1:
@@ -311,8 +311,8 @@ define void @and_sge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-LABEL: and_sge_eq:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slt a0, a0, a1
-; RV64I-NEXT:    xor a1, a2, a3
-; RV64I-NEXT:    snez a1, a1
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    snez a1, a2
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    bnez a0, .LBB13_2
 ; RV64I-NEXT:  # %bb.1:
@@ -336,8 +336,8 @@ define void @and_sle_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-LABEL: and_sle_eq:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slt a0, a1, a0
-; RV32I-NEXT:    xor a1, a2, a3
-; RV32I-NEXT:    snez a1, a1
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    snez a1, a2
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    bnez a0, .LBB14_2
 ; RV32I-NEXT:  # %bb.1:
@@ -348,8 +348,8 @@ define void @and_sle_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-LABEL: and_sle_eq:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slt a0, a1, a0
-; RV64I-NEXT:    xor a1, a2, a3
-; RV64I-NEXT:    snez a1, a1
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    snez a1, a2
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    bnez a0, .LBB14_2
 ; RV64I-NEXT:  # %bb.1:
@@ -373,8 +373,8 @@ define void @and_uge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-LABEL: and_uge_eq:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a0, a0, a1
-; RV32I-NEXT:    xor a1, a2, a3
-; RV32I-NEXT:    snez a1, a1
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    snez a1, a2
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    bnez a0, .LBB15_2
 ; RV32I-NEXT:  # %bb.1:
@@ -385,8 +385,8 @@ define void @and_uge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-LABEL: and_uge_eq:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sltu a0, a0, a1
-; RV64I-NEXT:    xor a1, a2, a3
-; RV64I-NEXT:    snez a1, a1
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    snez a1, a2
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    bnez a0, .LBB15_2
 ; RV64I-NEXT:  # %bb.1:
@@ -410,8 +410,8 @@ define void @and_ule_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-LABEL: and_ule_eq:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a0, a1, a0
-; RV32I-NEXT:    xor a1, a2, a3
-; RV32I-NEXT:    snez a1, a1
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    snez a1, a2
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    bnez a0, .LBB16_2
 ; RV32I-NEXT:  # %bb.1:
@@ -422,8 +422,8 @@ define void @and_ule_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-LABEL: and_ule_eq:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sltu a0, a1, a0
-; RV64I-NEXT:    xor a1, a2, a3
-; RV64I-NEXT:    snez a1, a1
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    snez a1, a2
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    bnez a0, .LBB16_2
 ; RV64I-NEXT:  # %bb.1:
@@ -447,8 +447,8 @@ define void @and_sge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-LABEL: and_sge_ne:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slt a0, a0, a1
-; RV32I-NEXT:    xor a1, a2, a3
-; RV32I-NEXT:    seqz a1, a1
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    seqz a1, a2
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    bnez a0, .LBB17_2
 ; RV32I-NEXT:  # %bb.1:
@@ -459,8 +459,8 @@ define void @and_sge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-LABEL: and_sge_ne:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slt a0, a0, a1
-; RV64I-NEXT:    xor a1, a2, a3
-; RV64I-NEXT:    seqz a1, a1
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    seqz a1, a2
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    bnez a0, .LBB17_2
 ; RV64I-NEXT:  # %bb.1:
@@ -484,8 +484,8 @@ define void @and_sle_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-LABEL: and_sle_ne:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slt a0, a1, a0
-; RV32I-NEXT:    xor a1, a2, a3
-; RV32I-NEXT:    seqz a1, a1
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    seqz a1, a2
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    bnez a0, .LBB18_2
 ; RV32I-NEXT:  # %bb.1:
@@ -496,8 +496,8 @@ define void @and_sle_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-LABEL: and_sle_ne:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slt a0, a1, a0
-; RV64I-NEXT:    xor a1, a2, a3
-; RV64I-NEXT:    seqz a1, a1
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    seqz a1, a2
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    bnez a0, .LBB18_2
 ; RV64I-NEXT:  # %bb.1:
@@ -521,8 +521,8 @@ define void @and_uge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-LABEL: and_uge_ne:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a0, a0, a1
-; RV32I-NEXT:    xor a1, a2, a3
-; RV32I-NEXT:    seqz a1, a1
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    seqz a1, a2
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    bnez a0, .LBB19_2
 ; RV32I-NEXT:  # %bb.1:
@@ -533,8 +533,8 @@ define void @and_uge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-LABEL: and_uge_ne:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sltu a0, a0, a1
-; RV64I-NEXT:    xor a1, a2, a3
-; RV64I-NEXT:    seqz a1, a1
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    seqz a1, a2
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    bnez a0, .LBB19_2
 ; RV64I-NEXT:  # %bb.1:
@@ -558,8 +558,8 @@ define void @and_ule_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-LABEL: and_ule_ne:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a0, a1, a0
-; RV32I-NEXT:    xor a1, a2, a3
-; RV32I-NEXT:    seqz a1, a1
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    seqz a1, a2
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    bnez a0, .LBB20_2
 ; RV32I-NEXT:  # %bb.1:
@@ -570,8 +570,8 @@ define void @and_ule_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-LABEL: and_ule_ne:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sltu a0, a1, a0
-; RV64I-NEXT:    xor a1, a2, a3
-; RV64I-NEXT:    seqz a1, a1
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    seqz a1, a2
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    bnez a0, .LBB20_2
 ; RV64I-NEXT:  # %bb.1:
@@ -595,8 +595,8 @@ define void @or_sge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV32I-LABEL: or_sge_eq:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slt a0, a0, a1
-; RV32I-NEXT:    xor a1, a2, a3
-; RV32I-NEXT:    snez a1, a1
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    snez a1, a2
 ; RV32I-NEXT:    and a0, a1, a0
 ; RV32I-NEXT:    bnez a0, .LBB21_2
 ; RV32I-NEXT:  # %bb.1:
@@ -607,8 +607,8 @@ define void @or_sge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV64I-LABEL: or_sge_eq:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slt a0, a0, a1
-; RV64I-NEXT:    xor a1, a2, a3
-; RV64I-NEXT:    snez a1, a1
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    snez a1, a2
 ; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    bnez a0, .LBB21_2
 ; RV64I-NEXT:  # %bb.1:
@@ -632,8 +632,8 @@ define void @or_sle_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV32I-LABEL: or_sle_eq:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slt a0, a1, a0
-; RV32I-NEXT:    xor a1, a2, a3
-; RV32I-NEXT:    snez a1, a1
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    snez a1, a2
 ; RV32I-NEXT:    and a0, a1, a0
 ; RV32I-NEXT:    bnez a0, .LBB22_2
 ; RV32I-NEXT:  # %bb.1:
@@ -644,8 +644,8 @@ define void @or_sle_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV64I-LABEL: or_sle_eq:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slt a0, a1, a0
-; RV64I-NEXT:    xor a1, a2, a3
-; RV64I-NEXT:    snez a1, a1
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    snez a1, a2
 ; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    bnez a0, .LBB22_2
 ; RV64I-NEXT:  # %bb.1:
@@ -669,8 +669,8 @@ define void @or_uge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV32I-LABEL: or_uge_eq:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a0, a0, a1
-; RV32I-NEXT:    xor a1, a2, a3
-; RV32I-NEXT:    snez a1, a1
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    snez a1, a2
 ; RV32I-NEXT:    and a0, a1, a0
 ; RV32I-NEXT:    bnez a0, .LBB23_2
 ; RV32I-NEXT:  # %bb.1:
@@ -681,8 +681,8 @@ define void @or_uge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV64I-LABEL: or_uge_eq:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sltu a0, a0, a1
-; RV64I-NEXT:    xor a1, a2, a3
-; RV64I-NEXT:    snez a1, a1
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    snez a1, a2
 ; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    bnez a0, .LBB23_2
 ; RV64I-NEXT:  # %bb.1:
@@ -706,8 +706,8 @@ define void @or_ule_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV32I-LABEL: or_ule_eq:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a0, a1, a0
-; RV32I-NEXT:    xor a1, a2, a3
-; RV32I-NEXT:    snez a1, a1
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    snez a1, a2
 ; RV32I-NEXT:    and a0, a1, a0
 ; RV32I-NEXT:    bnez a0, .LBB24_2
 ; RV32I-NEXT:  # %bb.1:
@@ -718,8 +718,8 @@ define void @or_ule_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV64I-LABEL: or_ule_eq:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sltu a0, a1, a0
-; RV64I-NEXT:    xor a1, a2, a3
-; RV64I-NEXT:    snez a1, a1
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    snez a1, a2
 ; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    bnez a0, .LBB24_2
 ; RV64I-NEXT:  # %bb.1:
@@ -743,8 +743,8 @@ define void @or_sge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV32I-LABEL: or_sge_ne:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slt a0, a0, a1
-; RV32I-NEXT:    xor a1, a2, a3
-; RV32I-NEXT:    seqz a1, a1
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    seqz a1, a2
 ; RV32I-NEXT:    and a0, a1, a0
 ; RV32I-NEXT:    bnez a0, .LBB25_2
 ; RV32I-NEXT:  # %bb.1:
@@ -755,8 +755,8 @@ define void @or_sge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV64I-LABEL: or_sge_ne:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slt a0, a0, a1
-; RV64I-NEXT:    xor a1, a2, a3
-; RV64I-NEXT:    seqz a1, a1
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    seqz a1, a2
 ; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    bnez a0, .LBB25_2
 ; RV64I-NEXT:  # %bb.1:
@@ -780,8 +780,8 @@ define void @or_sle_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV32I-LABEL: or_sle_ne:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slt a0, a1, a0
-; RV32I-NEXT:    xor a1, a2, a3
-; RV32I-NEXT:    seqz a1, a1
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    seqz a1, a2
 ; RV32I-NEXT:    and a0, a1, a0
 ; RV32I-NEXT:    bnez a0, .LBB26_2
 ; RV32I-NEXT:  # %bb.1:
@@ -792,8 +792,8 @@ define void @or_sle_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV64I-LABEL: or_sle_ne:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slt a0, a1, a0
-; RV64I-NEXT:    xor a1, a2, a3
-; RV64I-NEXT:    seqz a1, a1
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    seqz a1, a2
 ; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    bnez a0, .LBB26_2
 ; RV64I-NEXT:  # %bb.1:
@@ -817,8 +817,8 @@ define void @or_uge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV32I-LABEL: or_uge_ne:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a0, a0, a1
-; RV32I-NEXT:    xor a1, a2, a3
-; RV32I-NEXT:    seqz a1, a1
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    seqz a1, a2
 ; RV32I-NEXT:    and a0, a1, a0
 ; RV32I-NEXT:    bnez a0, .LBB27_2
 ; RV32I-NEXT:  # %bb.1:
@@ -829,8 +829,8 @@ define void @or_uge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV64I-LABEL: or_uge_ne:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sltu a0, a0, a1
-; RV64I-NEXT:    xor a1, a2, a3
-; RV64I-NEXT:    seqz a1, a1
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    seqz a1, a2
 ; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    bnez a0, .LBB27_2
 ; RV64I-NEXT:  # %bb.1:
@@ -854,8 +854,8 @@ define void @or_ule_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV32I-LABEL: or_ule_ne:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a0, a1, a0
-; RV32I-NEXT:    xor a1, a2, a3
-; RV32I-NEXT:    seqz a1, a1
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    seqz a1, a2
 ; RV32I-NEXT:    and a0, a1, a0
 ; RV32I-NEXT:    bnez a0, .LBB28_2
 ; RV32I-NEXT:  # %bb.1:
@@ -866,8 +866,8 @@ define void @or_ule_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV64I-LABEL: or_ule_ne:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sltu a0, a1, a0
-; RV64I-NEXT:    xor a1, a2, a3
-; RV64I-NEXT:    seqz a1, a1
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    seqz a1, a2
 ; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    bnez a0, .LBB28_2
 ; RV64I-NEXT:  # %bb.1:

diff  --git a/llvm/test/CodeGen/RISCV/shadowcallstack.ll b/llvm/test/CodeGen/RISCV/shadowcallstack.ll
index b72e994f03c0..50375fbea9dc 100644
--- a/llvm/test/CodeGen/RISCV/shadowcallstack.ll
+++ b/llvm/test/CodeGen/RISCV/shadowcallstack.ll
@@ -121,9 +121,9 @@ define i32 @f4() shadowcallstack {
 ; RV64-NEXT:    call bar at plt
 ; RV64-NEXT:    mv s3, a0
 ; RV64-NEXT:    call bar at plt
-; RV64-NEXT:    addw a1, s0, s1
+; RV64-NEXT:    addw s0, s0, s1
 ; RV64-NEXT:    addw a0, s3, a0
-; RV64-NEXT:    addw a0, a1, a0
+; RV64-NEXT:    addw a0, s0, a0
 ; RV64-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll b/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
index 29d1374b3ea1..4f4f808a9931 100644
--- a/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
+++ b/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
@@ -23,8 +23,8 @@ define iXLen2 @test_udiv_3(iXLen2 %x) nounwind {
 ; RV32-NEXT:    mulhu a6, a5, a4
 ; RV32-NEXT:    add a3, a6, a3
 ; RV32-NEXT:    sltu a0, a0, a2
-; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    mul a1, a0, a4
+; RV32-NEXT:    sub a1, a1, a0
+; RV32-NEXT:    mul a1, a1, a4
 ; RV32-NEXT:    add a1, a3, a1
 ; RV32-NEXT:    mul a0, a5, a4
 ; RV32-NEXT:    ret
@@ -48,8 +48,8 @@ define iXLen2 @test_udiv_3(iXLen2 %x) nounwind {
 ; RV64-NEXT:    mulhu a6, a4, a2
 ; RV64-NEXT:    add a5, a6, a5
 ; RV64-NEXT:    sltu a0, a0, a3
-; RV64-NEXT:    sub a0, a1, a0
-; RV64-NEXT:    mul a1, a0, a2
+; RV64-NEXT:    sub a1, a1, a0
+; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, a5, a1
 ; RV64-NEXT:    mul a0, a4, a2
 ; RV64-NEXT:    ret
@@ -76,8 +76,8 @@ define iXLen2 @test_udiv_5(iXLen2 %x) nounwind {
 ; RV32-NEXT:    mulhu a6, a5, a4
 ; RV32-NEXT:    add a3, a6, a3
 ; RV32-NEXT:    sltu a0, a0, a2
-; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    mul a1, a0, a4
+; RV32-NEXT:    sub a1, a1, a0
+; RV32-NEXT:    mul a1, a1, a4
 ; RV32-NEXT:    add a1, a3, a1
 ; RV32-NEXT:    mul a0, a5, a4
 ; RV32-NEXT:    ret
@@ -101,8 +101,8 @@ define iXLen2 @test_udiv_5(iXLen2 %x) nounwind {
 ; RV64-NEXT:    mulhu a6, a4, a2
 ; RV64-NEXT:    add a5, a6, a5
 ; RV64-NEXT:    sltu a0, a0, a3
-; RV64-NEXT:    sub a0, a1, a0
-; RV64-NEXT:    mul a1, a0, a2
+; RV64-NEXT:    sub a1, a1, a0
+; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, a5, a1
 ; RV64-NEXT:    mul a0, a4, a2
 ; RV64-NEXT:    ret
@@ -183,8 +183,8 @@ define iXLen2 @test_udiv_15(iXLen2 %x) nounwind {
 ; RV32-NEXT:    mulhu a6, a3, a4
 ; RV32-NEXT:    add a5, a6, a5
 ; RV32-NEXT:    sltu a0, a0, a2
-; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    mul a1, a0, a4
+; RV32-NEXT:    sub a1, a1, a0
+; RV32-NEXT:    mul a1, a1, a4
 ; RV32-NEXT:    add a1, a5, a1
 ; RV32-NEXT:    mul a0, a3, a4
 ; RV32-NEXT:    ret
@@ -210,8 +210,8 @@ define iXLen2 @test_udiv_15(iXLen2 %x) nounwind {
 ; RV64-NEXT:    mulhu a6, a3, a5
 ; RV64-NEXT:    add a4, a6, a4
 ; RV64-NEXT:    sltu a0, a0, a2
-; RV64-NEXT:    sub a0, a1, a0
-; RV64-NEXT:    mul a1, a0, a5
+; RV64-NEXT:    sub a1, a1, a0
+; RV64-NEXT:    mul a1, a1, a5
 ; RV64-NEXT:    add a1, a4, a1
 ; RV64-NEXT:    mul a0, a3, a5
 ; RV64-NEXT:    ret
@@ -238,8 +238,8 @@ define iXLen2 @test_udiv_17(iXLen2 %x) nounwind {
 ; RV32-NEXT:    mulhu a6, a5, a4
 ; RV32-NEXT:    add a3, a6, a3
 ; RV32-NEXT:    sltu a0, a0, a2
-; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    mul a1, a0, a4
+; RV32-NEXT:    sub a1, a1, a0
+; RV32-NEXT:    mul a1, a1, a4
 ; RV32-NEXT:    add a1, a3, a1
 ; RV32-NEXT:    mul a0, a5, a4
 ; RV32-NEXT:    ret
@@ -263,8 +263,8 @@ define iXLen2 @test_udiv_17(iXLen2 %x) nounwind {
 ; RV64-NEXT:    mulhu a6, a4, a2
 ; RV64-NEXT:    add a5, a6, a5
 ; RV64-NEXT:    sltu a0, a0, a3
-; RV64-NEXT:    sub a0, a1, a0
-; RV64-NEXT:    mul a1, a0, a2
+; RV64-NEXT:    sub a1, a1, a0
+; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, a5, a1
 ; RV64-NEXT:    mul a0, a4, a2
 ; RV64-NEXT:    ret
@@ -293,8 +293,8 @@ define iXLen2 @test_udiv_255(iXLen2 %x) nounwind {
 ; RV32-NEXT:    mulhu a6, a3, a4
 ; RV32-NEXT:    add a5, a6, a5
 ; RV32-NEXT:    sltu a0, a0, a2
-; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    mul a1, a0, a4
+; RV32-NEXT:    sub a1, a1, a0
+; RV32-NEXT:    mul a1, a1, a4
 ; RV32-NEXT:    add a1, a5, a1
 ; RV32-NEXT:    mul a0, a3, a4
 ; RV32-NEXT:    ret
@@ -320,8 +320,8 @@ define iXLen2 @test_udiv_255(iXLen2 %x) nounwind {
 ; RV64-NEXT:    mulhu a6, a3, a5
 ; RV64-NEXT:    add a4, a6, a4
 ; RV64-NEXT:    sltu a0, a0, a2
-; RV64-NEXT:    sub a0, a1, a0
-; RV64-NEXT:    mul a1, a0, a5
+; RV64-NEXT:    sub a1, a1, a0
+; RV64-NEXT:    mul a1, a1, a5
 ; RV64-NEXT:    add a1, a4, a1
 ; RV64-NEXT:    mul a0, a3, a5
 ; RV64-NEXT:    ret
@@ -348,8 +348,8 @@ define iXLen2 @test_udiv_257(iXLen2 %x) nounwind {
 ; RV32-NEXT:    mulhu a6, a5, a4
 ; RV32-NEXT:    add a3, a6, a3
 ; RV32-NEXT:    sltu a0, a0, a2
-; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    mul a1, a0, a4
+; RV32-NEXT:    sub a1, a1, a0
+; RV32-NEXT:    mul a1, a1, a4
 ; RV32-NEXT:    add a1, a3, a1
 ; RV32-NEXT:    mul a0, a5, a4
 ; RV32-NEXT:    ret
@@ -373,8 +373,8 @@ define iXLen2 @test_udiv_257(iXLen2 %x) nounwind {
 ; RV64-NEXT:    mulhu a6, a4, a2
 ; RV64-NEXT:    add a5, a6, a5
 ; RV64-NEXT:    sltu a0, a0, a3
-; RV64-NEXT:    sub a0, a1, a0
-; RV64-NEXT:    mul a1, a0, a2
+; RV64-NEXT:    sub a1, a1, a0
+; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, a5, a1
 ; RV64-NEXT:    mul a0, a4, a2
 ; RV64-NEXT:    ret
@@ -403,9 +403,9 @@ define iXLen2 @test_udiv_65535(iXLen2 %x) nounwind {
 ; RV32-NEXT:    mulhu a4, a3, a4
 ; RV32-NEXT:    add a4, a4, a5
 ; RV32-NEXT:    sltu a0, a0, a2
-; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    slli a1, a0, 16
-; RV32-NEXT:    add a0, a1, a0
+; RV32-NEXT:    sub a1, a1, a0
+; RV32-NEXT:    slli a0, a1, 16
+; RV32-NEXT:    add a0, a0, a1
 ; RV32-NEXT:    sub a1, a4, a0
 ; RV32-NEXT:    slli a0, a3, 16
 ; RV32-NEXT:    add a0, a0, a3
@@ -435,8 +435,8 @@ define iXLen2 @test_udiv_65535(iXLen2 %x) nounwind {
 ; RV64-NEXT:    mulhu a6, a3, a4
 ; RV64-NEXT:    add a5, a6, a5
 ; RV64-NEXT:    sltu a0, a0, a2
-; RV64-NEXT:    sub a0, a1, a0
-; RV64-NEXT:    mul a1, a0, a4
+; RV64-NEXT:    sub a1, a1, a0
+; RV64-NEXT:    mul a1, a1, a4
 ; RV64-NEXT:    add a1, a5, a1
 ; RV64-NEXT:    mul a0, a3, a4
 ; RV64-NEXT:    ret
@@ -462,9 +462,9 @@ define iXLen2 @test_udiv_65537(iXLen2 %x) nounwind {
 ; RV32-NEXT:    slli a5, a3, 16
 ; RV32-NEXT:    sub a4, a4, a5
 ; RV32-NEXT:    sltu a0, a0, a2
-; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    slli a1, a0, 16
-; RV32-NEXT:    sub a1, a0, a1
+; RV32-NEXT:    sub a1, a1, a0
+; RV32-NEXT:    slli a0, a1, 16
+; RV32-NEXT:    sub a1, a1, a0
 ; RV32-NEXT:    add a1, a4, a1
 ; RV32-NEXT:    sub a0, a3, a5
 ; RV32-NEXT:    ret
@@ -490,8 +490,8 @@ define iXLen2 @test_udiv_65537(iXLen2 %x) nounwind {
 ; RV64-NEXT:    mulhu a6, a5, a4
 ; RV64-NEXT:    add a3, a6, a3
 ; RV64-NEXT:    sltu a0, a0, a2
-; RV64-NEXT:    sub a0, a1, a0
-; RV64-NEXT:    mul a1, a0, a4
+; RV64-NEXT:    sub a1, a1, a0
+; RV64-NEXT:    mul a1, a1, a4
 ; RV64-NEXT:    add a1, a3, a1
 ; RV64-NEXT:    mul a0, a5, a4
 ; RV64-NEXT:    ret
@@ -522,8 +522,8 @@ define iXLen2 @test_udiv_12(iXLen2 %x) nounwind {
 ; RV32-NEXT:    mulhu a6, a5, a4
 ; RV32-NEXT:    add a3, a6, a3
 ; RV32-NEXT:    sltu a0, a0, a2
-; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    mul a1, a0, a4
+; RV32-NEXT:    sub a1, a1, a0
+; RV32-NEXT:    mul a1, a1, a4
 ; RV32-NEXT:    add a1, a3, a1
 ; RV32-NEXT:    mul a0, a5, a4
 ; RV32-NEXT:    ret
@@ -551,8 +551,8 @@ define iXLen2 @test_udiv_12(iXLen2 %x) nounwind {
 ; RV64-NEXT:    mulhu a6, a4, a2
 ; RV64-NEXT:    add a5, a6, a5
 ; RV64-NEXT:    sltu a0, a0, a3
-; RV64-NEXT:    sub a0, a1, a0
-; RV64-NEXT:    mul a1, a0, a2
+; RV64-NEXT:    sub a1, a1, a0
+; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, a5, a1
 ; RV64-NEXT:    mul a0, a4, a2
 ; RV64-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index 6495238b4b2f..e1b9670f5e6e 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -674,8 +674,8 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32MV-NEXT:    vslidedown.vi v10, v8, 2
 ; RV32MV-NEXT:    vmv.x.s a1, v10
 ; RV32MV-NEXT:    slli a2, a1, 1
-; RV32MV-NEXT:    sub a0, a2, a0
-; RV32MV-NEXT:    sw a0, 4(s2)
+; RV32MV-NEXT:    sub a2, a2, a0
+; RV32MV-NEXT:    sw a2, 4(s2)
 ; RV32MV-NEXT:    vslidedown.vi v10, v8, 4
 ; RV32MV-NEXT:    vmv.x.s a0, v10
 ; RV32MV-NEXT:    srli a2, a0, 30
@@ -782,8 +782,8 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64MV-NEXT:    slli a2, a2, 2
 ; RV64MV-NEXT:    slli a3, a3, 31
 ; RV64MV-NEXT:    srli a3, a3, 62
-; RV64MV-NEXT:    or a1, a3, a2
-; RV64MV-NEXT:    sw a1, 8(a0)
+; RV64MV-NEXT:    or a2, a3, a2
+; RV64MV-NEXT:    sw a2, 8(a0)
 ; RV64MV-NEXT:    addi sp, s0, -64
 ; RV64MV-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; RV64MV-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
index 367095efdf39..8356029a8ccd 100644
--- a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
@@ -179,16 +179,16 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    ld a6, %lo(.LCPI0_3)(a6)
 ; RV64IM-NEXT:    li a7, 98
 ; RV64IM-NEXT:    mulw a3, a3, a7
-; RV64IM-NEXT:    subw a3, a5, a3
-; RV64IM-NEXT:    mulh a5, a4, a6
-; RV64IM-NEXT:    srli a6, a5, 63
-; RV64IM-NEXT:    srli a5, a5, 7
-; RV64IM-NEXT:    addw a5, a5, a6
+; RV64IM-NEXT:    subw a5, a5, a3
+; RV64IM-NEXT:    mulh a3, a4, a6
+; RV64IM-NEXT:    srli a6, a3, 63
+; RV64IM-NEXT:    srli a3, a3, 7
+; RV64IM-NEXT:    addw a3, a3, a6
 ; RV64IM-NEXT:    li a6, -1003
-; RV64IM-NEXT:    mulw a5, a5, a6
-; RV64IM-NEXT:    subw a4, a4, a5
+; RV64IM-NEXT:    mulw a3, a3, a6
+; RV64IM-NEXT:    subw a4, a4, a3
 ; RV64IM-NEXT:    sh a4, 6(a0)
-; RV64IM-NEXT:    sh a3, 4(a0)
+; RV64IM-NEXT:    sh a5, 4(a0)
 ; RV64IM-NEXT:    sh a1, 2(a0)
 ; RV64IM-NEXT:    sh a2, 0(a0)
 ; RV64IM-NEXT:    ret
@@ -360,8 +360,8 @@ define <4 x i16> @fold_srem_vec_2(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a3, a3, 6
 ; RV64IM-NEXT:    addw a3, a3, a6
 ; RV64IM-NEXT:    mulw a3, a3, a7
-; RV64IM-NEXT:    subw a3, a4, a3
-; RV64IM-NEXT:    sh a3, 6(a0)
+; RV64IM-NEXT:    subw a4, a4, a3
+; RV64IM-NEXT:    sh a4, 6(a0)
 ; RV64IM-NEXT:    sh a5, 4(a0)
 ; RV64IM-NEXT:    sh a1, 2(a0)
 ; RV64IM-NEXT:    sh a2, 0(a0)
@@ -749,13 +749,13 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a3, a5, 59
 ; RV64IM-NEXT:    add a3, a5, a3
 ; RV64IM-NEXT:    andi a3, a3, -32
-; RV64IM-NEXT:    subw a3, a5, a3
-; RV64IM-NEXT:    srli a5, a4, 61
-; RV64IM-NEXT:    add a5, a4, a5
-; RV64IM-NEXT:    andi a5, a5, -8
-; RV64IM-NEXT:    subw a4, a4, a5
+; RV64IM-NEXT:    subw a5, a5, a3
+; RV64IM-NEXT:    srli a3, a4, 61
+; RV64IM-NEXT:    add a3, a4, a3
+; RV64IM-NEXT:    andi a3, a3, -8
+; RV64IM-NEXT:    subw a4, a4, a3
 ; RV64IM-NEXT:    sh a4, 4(a0)
-; RV64IM-NEXT:    sh a3, 2(a0)
+; RV64IM-NEXT:    sh a5, 2(a0)
 ; RV64IM-NEXT:    sh a1, 0(a0)
 ; RV64IM-NEXT:    sh a2, 6(a0)
 ; RV64IM-NEXT:    ret
@@ -911,9 +911,9 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    lui a5, 1
 ; RV64IM-NEXT:    addiw a5, a5, 1327
 ; RV64IM-NEXT:    mulw a3, a3, a5
-; RV64IM-NEXT:    subw a3, a4, a3
+; RV64IM-NEXT:    subw a4, a4, a3
 ; RV64IM-NEXT:    sh zero, 0(a0)
-; RV64IM-NEXT:    sh a3, 6(a0)
+; RV64IM-NEXT:    sh a4, 6(a0)
 ; RV64IM-NEXT:    sh a1, 2(a0)
 ; RV64IM-NEXT:    sh a2, 4(a0)
 ; RV64IM-NEXT:    ret
@@ -1055,15 +1055,15 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    lui a5, 1
 ; RV64IM-NEXT:    addiw a5, a5, 1327
 ; RV64IM-NEXT:    mulw a3, a3, a5
-; RV64IM-NEXT:    subw a3, a4, a3
-; RV64IM-NEXT:    srli a4, a1, 49
-; RV64IM-NEXT:    add a4, a1, a4
+; RV64IM-NEXT:    subw a4, a4, a3
+; RV64IM-NEXT:    srli a3, a1, 49
+; RV64IM-NEXT:    add a3, a1, a3
 ; RV64IM-NEXT:    lui a5, 8
-; RV64IM-NEXT:    and a4, a4, a5
-; RV64IM-NEXT:    subw a1, a1, a4
+; RV64IM-NEXT:    and a3, a3, a5
+; RV64IM-NEXT:    subw a1, a1, a3
 ; RV64IM-NEXT:    sh zero, 0(a0)
 ; RV64IM-NEXT:    sh a1, 2(a0)
-; RV64IM-NEXT:    sh a3, 6(a0)
+; RV64IM-NEXT:    sh a4, 6(a0)
 ; RV64IM-NEXT:    sh a2, 4(a0)
 ; RV64IM-NEXT:    ret
   %1 = srem <4 x i16> %x, <i16 1, i16 32768, i16 23, i16 5423>
@@ -1279,9 +1279,9 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV64IM-NEXT:    lui a5, 1
 ; RV64IM-NEXT:    addiw a5, a5, 1327
 ; RV64IM-NEXT:    mul a3, a3, a5
-; RV64IM-NEXT:    sub a3, a4, a3
+; RV64IM-NEXT:    sub a4, a4, a3
 ; RV64IM-NEXT:    sd zero, 0(a0)
-; RV64IM-NEXT:    sd a3, 24(a0)
+; RV64IM-NEXT:    sd a4, 24(a0)
 ; RV64IM-NEXT:    sd a1, 8(a0)
 ; RV64IM-NEXT:    sd a2, 16(a0)
 ; RV64IM-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
index ce45c115b365..f82f320e9d23 100644
--- a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
@@ -132,8 +132,8 @@ define i64 @load_i64(i64* %p) {
 ; RV64I-NEXT:    or a1, a1, a2
 ; RV64I-NEXT:    slli a3, a3, 16
 ; RV64I-NEXT:    slli a4, a4, 24
-; RV64I-NEXT:    or a2, a4, a3
-; RV64I-NEXT:    or a1, a2, a1
+; RV64I-NEXT:    or a3, a4, a3
+; RV64I-NEXT:    or a1, a3, a1
 ; RV64I-NEXT:    lbu a2, 5(a0)
 ; RV64I-NEXT:    lbu a3, 4(a0)
 ; RV64I-NEXT:    lbu a4, 6(a0)

diff  --git a/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll b/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll
index 6a68157ffe85..0772109a5525 100644
--- a/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll
+++ b/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll
@@ -478,18 +478,18 @@ define i32 @in_complex_y0_m0(i32 %x, i32 %y_hi, i32 %y_low, i32 %m_a, i32 %m_b)
 ; CHECK-I-LABEL: in_complex_y0_m0:
 ; CHECK-I:       # %bb.0:
 ; CHECK-I-NEXT:    and a1, a1, a2
-; CHECK-I-NEXT:    xor a2, a3, a4
+; CHECK-I-NEXT:    xor a3, a3, a4
 ; CHECK-I-NEXT:    xor a0, a0, a1
-; CHECK-I-NEXT:    and a0, a0, a2
+; CHECK-I-NEXT:    and a0, a0, a3
 ; CHECK-I-NEXT:    xor a0, a0, a1
 ; CHECK-I-NEXT:    ret
 ;
 ; CHECK-ZBB-LABEL: in_complex_y0_m0:
 ; CHECK-ZBB:       # %bb.0:
 ; CHECK-ZBB-NEXT:    and a1, a1, a2
-; CHECK-ZBB-NEXT:    xor a2, a3, a4
-; CHECK-ZBB-NEXT:    andn a1, a1, a2
-; CHECK-ZBB-NEXT:    and a0, a0, a2
+; CHECK-ZBB-NEXT:    xor a3, a3, a4
+; CHECK-ZBB-NEXT:    andn a1, a1, a3
+; CHECK-ZBB-NEXT:    and a0, a0, a3
 ; CHECK-ZBB-NEXT:    or a0, a0, a1
 ; CHECK-ZBB-NEXT:    ret
   %y = and i32 %y_hi, %y_low
@@ -504,18 +504,18 @@ define i32 @in_complex_y1_m0(i32 %x, i32 %y_hi, i32 %y_low, i32 %m_a, i32 %m_b)
 ; CHECK-I-LABEL: in_complex_y1_m0:
 ; CHECK-I:       # %bb.0:
 ; CHECK-I-NEXT:    and a1, a1, a2
-; CHECK-I-NEXT:    xor a2, a3, a4
+; CHECK-I-NEXT:    xor a3, a3, a4
 ; CHECK-I-NEXT:    xor a0, a0, a1
-; CHECK-I-NEXT:    and a0, a0, a2
+; CHECK-I-NEXT:    and a0, a0, a3
 ; CHECK-I-NEXT:    xor a0, a1, a0
 ; CHECK-I-NEXT:    ret
 ;
 ; CHECK-ZBB-LABEL: in_complex_y1_m0:
 ; CHECK-ZBB:       # %bb.0:
 ; CHECK-ZBB-NEXT:    and a1, a1, a2
-; CHECK-ZBB-NEXT:    xor a2, a3, a4
-; CHECK-ZBB-NEXT:    andn a1, a1, a2
-; CHECK-ZBB-NEXT:    and a0, a0, a2
+; CHECK-ZBB-NEXT:    xor a3, a3, a4
+; CHECK-ZBB-NEXT:    andn a1, a1, a3
+; CHECK-ZBB-NEXT:    and a0, a0, a3
 ; CHECK-ZBB-NEXT:    or a0, a0, a1
 ; CHECK-ZBB-NEXT:    ret
   %y = and i32 %y_hi, %y_low
@@ -530,18 +530,18 @@ define i32 @in_complex_y0_m1(i32 %x, i32 %y_hi, i32 %y_low, i32 %m_a, i32 %m_b)
 ; CHECK-I-LABEL: in_complex_y0_m1:
 ; CHECK-I:       # %bb.0:
 ; CHECK-I-NEXT:    and a1, a1, a2
-; CHECK-I-NEXT:    xor a2, a3, a4
+; CHECK-I-NEXT:    xor a3, a3, a4
 ; CHECK-I-NEXT:    xor a0, a0, a1
-; CHECK-I-NEXT:    and a0, a2, a0
+; CHECK-I-NEXT:    and a0, a3, a0
 ; CHECK-I-NEXT:    xor a0, a0, a1
 ; CHECK-I-NEXT:    ret
 ;
 ; CHECK-ZBB-LABEL: in_complex_y0_m1:
 ; CHECK-ZBB:       # %bb.0:
 ; CHECK-ZBB-NEXT:    and a1, a1, a2
-; CHECK-ZBB-NEXT:    xor a2, a3, a4
-; CHECK-ZBB-NEXT:    andn a1, a1, a2
-; CHECK-ZBB-NEXT:    and a0, a0, a2
+; CHECK-ZBB-NEXT:    xor a3, a3, a4
+; CHECK-ZBB-NEXT:    andn a1, a1, a3
+; CHECK-ZBB-NEXT:    and a0, a0, a3
 ; CHECK-ZBB-NEXT:    or a0, a0, a1
 ; CHECK-ZBB-NEXT:    ret
   %y = and i32 %y_hi, %y_low
@@ -556,18 +556,18 @@ define i32 @in_complex_y1_m1(i32 %x, i32 %y_hi, i32 %y_low, i32 %m_a, i32 %m_b)
 ; CHECK-I-LABEL: in_complex_y1_m1:
 ; CHECK-I:       # %bb.0:
 ; CHECK-I-NEXT:    and a1, a1, a2
-; CHECK-I-NEXT:    xor a2, a3, a4
+; CHECK-I-NEXT:    xor a3, a3, a4
 ; CHECK-I-NEXT:    xor a0, a0, a1
-; CHECK-I-NEXT:    and a0, a2, a0
+; CHECK-I-NEXT:    and a0, a3, a0
 ; CHECK-I-NEXT:    xor a0, a1, a0
 ; CHECK-I-NEXT:    ret
 ;
 ; CHECK-ZBB-LABEL: in_complex_y1_m1:
 ; CHECK-ZBB:       # %bb.0:
 ; CHECK-ZBB-NEXT:    and a1, a1, a2
-; CHECK-ZBB-NEXT:    xor a2, a3, a4
-; CHECK-ZBB-NEXT:    andn a1, a1, a2
-; CHECK-ZBB-NEXT:    and a0, a0, a2
+; CHECK-ZBB-NEXT:    xor a3, a3, a4
+; CHECK-ZBB-NEXT:    andn a1, a1, a3
+; CHECK-ZBB-NEXT:    and a0, a0, a3
 ; CHECK-ZBB-NEXT:    or a0, a0, a1
 ; CHECK-ZBB-NEXT:    ret
   %y = and i32 %y_hi, %y_low
@@ -780,8 +780,8 @@ define i32 @out_constant_mone_vary_invmask(i32 %x, i32 %y, i32 %mask) {
 ;
 ; CHECK-ZBB-LABEL: out_constant_mone_vary_invmask:
 ; CHECK-ZBB:       # %bb.0:
-; CHECK-ZBB-NEXT:    and a0, a2, a1
-; CHECK-ZBB-NEXT:    orn a0, a0, a2
+; CHECK-ZBB-NEXT:    and a1, a2, a1
+; CHECK-ZBB-NEXT:    orn a0, a1, a2
 ; CHECK-ZBB-NEXT:    ret
   %notmask = xor i32 %mask, -1
   %mx = and i32 %notmask, -1

diff  --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
index b5b1901d778b..948858f95c5d 100644
--- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
@@ -333,8 +333,8 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV32-NEXT:    lb a0, 4(a0)
 ; RV32-NEXT:    lw a1, 0(s0)
 ; RV32-NEXT:    slli a0, a0, 10
-; RV32-NEXT:    srli a2, a1, 22
-; RV32-NEXT:    or s1, a2, a0
+; RV32-NEXT:    srli s1, a1, 22
+; RV32-NEXT:    or s1, s1, a0
 ; RV32-NEXT:    srli s2, a1, 11
 ; RV32-NEXT:    andi a0, a1, 2047
 ; RV32-NEXT:    li a1, 683

diff  --git a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
index 0113bd5d0d26..c10e2bc84b74 100644
--- a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
@@ -171,14 +171,14 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    ld a6, %lo(.LCPI0_3)(a6)
 ; RV64IM-NEXT:    li a7, 98
 ; RV64IM-NEXT:    mulw a3, a3, a7
-; RV64IM-NEXT:    subw a3, a5, a3
-; RV64IM-NEXT:    mulhu a5, a4, a6
-; RV64IM-NEXT:    srli a5, a5, 7
+; RV64IM-NEXT:    subw a5, a5, a3
+; RV64IM-NEXT:    mulhu a3, a4, a6
+; RV64IM-NEXT:    srli a3, a3, 7
 ; RV64IM-NEXT:    li a6, 1003
-; RV64IM-NEXT:    mulw a5, a5, a6
-; RV64IM-NEXT:    subw a4, a4, a5
+; RV64IM-NEXT:    mulw a3, a3, a6
+; RV64IM-NEXT:    subw a4, a4, a3
 ; RV64IM-NEXT:    sh a4, 6(a0)
-; RV64IM-NEXT:    sh a3, 4(a0)
+; RV64IM-NEXT:    sh a5, 4(a0)
 ; RV64IM-NEXT:    sh a1, 2(a0)
 ; RV64IM-NEXT:    sh a2, 0(a0)
 ; RV64IM-NEXT:    ret
@@ -350,8 +350,8 @@ define <4 x i16> @fold_urem_vec_2(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    add a3, a6, a3
 ; RV64IM-NEXT:    srli a3, a3, 6
 ; RV64IM-NEXT:    mulw a3, a3, a7
-; RV64IM-NEXT:    subw a3, a4, a3
-; RV64IM-NEXT:    sh a3, 6(a0)
+; RV64IM-NEXT:    subw a4, a4, a3
+; RV64IM-NEXT:    sh a4, 6(a0)
 ; RV64IM-NEXT:    sh a5, 4(a0)
 ; RV64IM-NEXT:    sh a1, 2(a0)
 ; RV64IM-NEXT:    sh a2, 0(a0)
@@ -857,9 +857,9 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    lui a5, 1
 ; RV64IM-NEXT:    addiw a5, a5, 1327
 ; RV64IM-NEXT:    mulw a3, a3, a5
-; RV64IM-NEXT:    subw a3, a4, a3
+; RV64IM-NEXT:    subw a4, a4, a3
 ; RV64IM-NEXT:    sh zero, 0(a0)
-; RV64IM-NEXT:    sh a3, 6(a0)
+; RV64IM-NEXT:    sh a4, 6(a0)
 ; RV64IM-NEXT:    sh a1, 2(a0)
 ; RV64IM-NEXT:    sh a2, 4(a0)
 ; RV64IM-NEXT:    ret
@@ -1082,9 +1082,9 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV64IM-NEXT:    lui a5, 1
 ; RV64IM-NEXT:    addiw a5, a5, 1327
 ; RV64IM-NEXT:    mul a3, a3, a5
-; RV64IM-NEXT:    sub a3, a4, a3
+; RV64IM-NEXT:    sub a4, a4, a3
 ; RV64IM-NEXT:    sd zero, 0(a0)
-; RV64IM-NEXT:    sd a3, 24(a0)
+; RV64IM-NEXT:    sd a4, 24(a0)
 ; RV64IM-NEXT:    sd a1, 8(a0)
 ; RV64IM-NEXT:    sd a2, 16(a0)
 ; RV64IM-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll
index bf9552cb591d..3a54f992eac1 100644
--- a/llvm/test/CodeGen/RISCV/xaluo.ll
+++ b/llvm/test/CodeGen/RISCV/xaluo.ll
@@ -1957,10 +1957,10 @@ define i64 @ssubo.select.i64(i64 %v1, i64 %v2) {
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    sltu a4, a0, a2
 ; RV32-NEXT:    sub a5, a1, a3
-; RV32-NEXT:    sub a4, a5, a4
-; RV32-NEXT:    xor a4, a1, a4
-; RV32-NEXT:    xor a5, a1, a3
-; RV32-NEXT:    and a4, a5, a4
+; RV32-NEXT:    sub a5, a5, a4
+; RV32-NEXT:    xor a5, a1, a5
+; RV32-NEXT:    xor a4, a1, a3
+; RV32-NEXT:    and a4, a4, a5
 ; RV32-NEXT:    bltz a4, .LBB38_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    mv a0, a2
@@ -1983,10 +1983,10 @@ define i64 @ssubo.select.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA:       # %bb.0: # %entry
 ; RV32ZBA-NEXT:    sltu a4, a0, a2
 ; RV32ZBA-NEXT:    sub a5, a1, a3
-; RV32ZBA-NEXT:    sub a4, a5, a4
-; RV32ZBA-NEXT:    xor a4, a1, a4
-; RV32ZBA-NEXT:    xor a5, a1, a3
-; RV32ZBA-NEXT:    and a4, a5, a4
+; RV32ZBA-NEXT:    sub a5, a5, a4
+; RV32ZBA-NEXT:    xor a5, a1, a5
+; RV32ZBA-NEXT:    xor a4, a1, a3
+; RV32ZBA-NEXT:    and a4, a4, a5
 ; RV32ZBA-NEXT:    bltz a4, .LBB38_2
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
 ; RV32ZBA-NEXT:    mv a0, a2
@@ -2016,11 +2016,11 @@ define i1 @ssub.not.i64(i64 %v1, i64 %v2) {
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    sltu a0, a0, a2
 ; RV32-NEXT:    sub a2, a1, a3
-; RV32-NEXT:    sub a0, a2, a0
-; RV32-NEXT:    xor a0, a1, a0
+; RV32-NEXT:    sub a2, a2, a0
+; RV32-NEXT:    xor a2, a1, a2
 ; RV32-NEXT:    xor a1, a1, a3
-; RV32-NEXT:    and a0, a1, a0
-; RV32-NEXT:    slti a0, a0, 0
+; RV32-NEXT:    and a1, a1, a2
+; RV32-NEXT:    slti a0, a1, 0
 ; RV32-NEXT:    xori a0, a0, 1
 ; RV32-NEXT:    ret
 ;
@@ -2037,11 +2037,11 @@ define i1 @ssub.not.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA:       # %bb.0: # %entry
 ; RV32ZBA-NEXT:    sltu a0, a0, a2
 ; RV32ZBA-NEXT:    sub a2, a1, a3
-; RV32ZBA-NEXT:    sub a0, a2, a0
-; RV32ZBA-NEXT:    xor a0, a1, a0
+; RV32ZBA-NEXT:    sub a2, a2, a0
+; RV32ZBA-NEXT:    xor a2, a1, a2
 ; RV32ZBA-NEXT:    xor a1, a1, a3
-; RV32ZBA-NEXT:    and a0, a1, a0
-; RV32ZBA-NEXT:    slti a0, a0, 0
+; RV32ZBA-NEXT:    and a1, a1, a2
+; RV32ZBA-NEXT:    slti a0, a1, 0
 ; RV32ZBA-NEXT:    xori a0, a0, 1
 ; RV32ZBA-NEXT:    ret
 ;
@@ -3186,11 +3186,11 @@ define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    sltu a0, a0, a2
 ; RV32-NEXT:    sub a2, a1, a3
-; RV32-NEXT:    sub a0, a2, a0
-; RV32-NEXT:    xor a0, a1, a0
+; RV32-NEXT:    sub a2, a2, a0
+; RV32-NEXT:    xor a2, a1, a2
 ; RV32-NEXT:    xor a1, a1, a3
-; RV32-NEXT:    and a0, a1, a0
-; RV32-NEXT:    bgez a0, .LBB57_2
+; RV32-NEXT:    and a1, a1, a2
+; RV32-NEXT:    bgez a1, .LBB57_2
 ; RV32-NEXT:  # %bb.1: # %overflow
 ; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
@@ -3215,11 +3215,11 @@ define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA:       # %bb.0: # %entry
 ; RV32ZBA-NEXT:    sltu a0, a0, a2
 ; RV32ZBA-NEXT:    sub a2, a1, a3
-; RV32ZBA-NEXT:    sub a0, a2, a0
-; RV32ZBA-NEXT:    xor a0, a1, a0
+; RV32ZBA-NEXT:    sub a2, a2, a0
+; RV32ZBA-NEXT:    xor a2, a1, a2
 ; RV32ZBA-NEXT:    xor a1, a1, a3
-; RV32ZBA-NEXT:    and a0, a1, a0
-; RV32ZBA-NEXT:    bgez a0, .LBB57_2
+; RV32ZBA-NEXT:    and a1, a1, a2
+; RV32ZBA-NEXT:    bgez a1, .LBB57_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
 ; RV32ZBA-NEXT:    li a0, 0
 ; RV32ZBA-NEXT:    ret


        


More information about the llvm-commits mailing list