[llvm] a8c7912 - [RISCV] Teach getRegAllocationHints about compressible SRAI/SRLI.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 30 10:44:12 PST 2022


Author: Craig Topper
Date: 2022-11-30T10:28:57-08:00
New Revision: a8c79121bf4da9155a2d8597c1fdef2796efc55b

URL: https://github.com/llvm/llvm-project/commit/a8c79121bf4da9155a2d8597c1fdef2796efc55b
DIFF: https://github.com/llvm/llvm-project/commit/a8c79121bf4da9155a2d8597c1fdef2796efc55b.diff

LOG: [RISCV] Teach getRegAllocationHints about compressible SRAI/SRLI.

Similar to previous patches for ADDI/ADDIW/SLLI/ADD, but restricted
to only cases where the register is x8-x15(GPRC reg class).

I've restricted it so that we can be precise about whether the
resulting instruction would be compressible. Changing the register
allocation may make some other instruction not compressible so we
should try to be accurate.

Reviewed By: asb

Differential Revision: https://reviews.llvm.org/D138740

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
    llvm/test/CodeGen/RISCV/aext-to-sext.ll
    llvm/test/CodeGen/RISCV/atomic-signext.ll
    llvm/test/CodeGen/RISCV/calling-conv-half.ll
    llvm/test/CodeGen/RISCV/copysign-casts.ll
    llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
    llvm/test/CodeGen/RISCV/div-pow2.ll
    llvm/test/CodeGen/RISCV/float-arith.ll
    llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
    llvm/test/CodeGen/RISCV/half-arith.ll
    llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
    llvm/test/CodeGen/RISCV/rv32zbb.ll
    llvm/test/CodeGen/RISCV/rv32zbkb.ll
    llvm/test/CodeGen/RISCV/rv64zbkb.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
    llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
    llvm/test/CodeGen/RISCV/select-constant-xor.ll
    llvm/test/CodeGen/RISCV/select.ll
    llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
    llvm/test/CodeGen/RISCV/xaluo.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 1cc94e23fe10..f05985326863 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -426,12 +426,12 @@ bool RISCVRegisterInfo::getRegAllocationHints(
   // Add any two address hints after any copy hints.
   SmallSet<unsigned, 4> TwoAddrHints;
 
-  auto tryAddHint = [&](const MachineOperand &VRRegMO,
-                        const MachineOperand &MO) -> void {
+  auto tryAddHint = [&](const MachineOperand &VRRegMO, const MachineOperand &MO,
+                        bool NeedGPRC) -> void {
     Register Reg = MO.getReg();
     Register PhysReg =
         Register::isPhysicalRegister(Reg) ? Reg : Register(VRM->getPhys(Reg));
-    if (PhysReg) {
+    if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg))) {
       assert(!MO.getSubReg() && !VRRegMO.getSubReg() && "Unexpected subreg!");
       if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
         TwoAddrHints.insert(PhysReg);
@@ -441,10 +441,15 @@ bool RISCVRegisterInfo::getRegAllocationHints(
   // For now we support the compressible instructions which can encode all
   // registers and have a single register source.
   // TODO: Add more compressed instructions.
-  auto isCompressible = [](const MachineInstr &MI) {
+  auto isCompressible = [](const MachineInstr &MI, bool &NeedGPRC) {
+    NeedGPRC = false;
     switch (MI.getOpcode()) {
     default:
       return false;
+    case RISCV::SRAI:
+    case RISCV::SRLI:
+      NeedGPRC = true;
+      return true;
     case RISCV::ADD:
     case RISCV::SLLI:
       return true;
@@ -456,16 +461,17 @@ bool RISCVRegisterInfo::getRegAllocationHints(
 
   for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
     const MachineInstr &MI = *MO.getParent();
-    if (isCompressible(MI)) {
+    bool NeedGPRC;
+    if (isCompressible(MI, NeedGPRC)) {
       unsigned OpIdx = MI.getOperandNo(&MO);
       if (OpIdx == 0 && MI.getOperand(1).isReg()) {
-        tryAddHint(MO, MI.getOperand(1));
+        tryAddHint(MO, MI.getOperand(1), NeedGPRC);
         if (MI.isCommutable() && MI.getOperand(2).isReg())
-          tryAddHint(MO, MI.getOperand(2));
+          tryAddHint(MO, MI.getOperand(2), NeedGPRC);
       } else if (OpIdx == 1) {
-        tryAddHint(MO, MI.getOperand(0));
+        tryAddHint(MO, MI.getOperand(0), NeedGPRC);
       } else if (MI.isCommutable() && OpIdx == 2) {
-        tryAddHint(MO, MI.getOperand(0));
+        tryAddHint(MO, MI.getOperand(0), NeedGPRC);
       }
     }
   }

diff  --git a/llvm/test/CodeGen/RISCV/aext-to-sext.ll b/llvm/test/CodeGen/RISCV/aext-to-sext.ll
index fb09b7cc21fd..806c495fa677 100644
--- a/llvm/test/CodeGen/RISCV/aext-to-sext.ll
+++ b/llvm/test/CodeGen/RISCV/aext-to-sext.ll
@@ -82,8 +82,8 @@ define i64 @sext_phi_constants(i32 signext %c) {
 ; RV64I-NEXT:  # %bb.1: # %iffalse
 ; RV64I-NEXT:    li a1, -2
 ; RV64I-NEXT:  .LBB2_2: # %merge
-; RV64I-NEXT:    slli a1, a1, 32
-; RV64I-NEXT:    srli a0, a1, 32
+; RV64I-NEXT:    slli a0, a1, 32
+; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
   %a = icmp ne i32 %c, 0
   br i1 %a, label %iftrue, label %iffalse

diff  --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll
index 16dea0e7accb..b4da0d62b3d7 100644
--- a/llvm/test/CodeGen/RISCV/atomic-signext.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll
@@ -609,8 +609,8 @@ define signext i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB10_1
 ; RV32I-NEXT:  .LBB10_4: # %atomicrmw.end
-; RV32I-NEXT:    slli a3, a3, 24
-; RV32I-NEXT:    srai a0, a3, 24
+; RV32I-NEXT:    slli a0, a3, 24
+; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -683,8 +683,8 @@ define signext i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB10_1
 ; RV64I-NEXT:  .LBB10_4: # %atomicrmw.end
-; RV64I-NEXT:    slli a3, a3, 56
-; RV64I-NEXT:    srai a0, a3, 56
+; RV64I-NEXT:    slli a0, a3, 56
+; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -761,8 +761,8 @@ define signext i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB11_1
 ; RV32I-NEXT:  .LBB11_4: # %atomicrmw.end
-; RV32I-NEXT:    slli a3, a3, 24
-; RV32I-NEXT:    srai a0, a3, 24
+; RV32I-NEXT:    slli a0, a3, 24
+; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -835,8 +835,8 @@ define signext i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB11_1
 ; RV64I-NEXT:  .LBB11_4: # %atomicrmw.end
-; RV64I-NEXT:    slli a3, a3, 56
-; RV64I-NEXT:    srai a0, a3, 56
+; RV64I-NEXT:    slli a0, a3, 56
+; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -911,8 +911,8 @@ define signext i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB12_1
 ; RV32I-NEXT:  .LBB12_4: # %atomicrmw.end
-; RV32I-NEXT:    slli a3, a3, 24
-; RV32I-NEXT:    srai a0, a3, 24
+; RV32I-NEXT:    slli a0, a3, 24
+; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -978,8 +978,8 @@ define signext i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB12_1
 ; RV64I-NEXT:  .LBB12_4: # %atomicrmw.end
-; RV64I-NEXT:    slli a3, a3, 56
-; RV64I-NEXT:    srai a0, a3, 56
+; RV64I-NEXT:    slli a0, a3, 56
+; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -1049,8 +1049,8 @@ define signext i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB13_1
 ; RV32I-NEXT:  .LBB13_4: # %atomicrmw.end
-; RV32I-NEXT:    slli a3, a3, 24
-; RV32I-NEXT:    srai a0, a3, 24
+; RV32I-NEXT:    slli a0, a3, 24
+; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -1116,8 +1116,8 @@ define signext i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB13_1
 ; RV64I-NEXT:  .LBB13_4: # %atomicrmw.end
-; RV64I-NEXT:    slli a3, a3, 56
-; RV64I-NEXT:    srai a0, a3, 56
+; RV64I-NEXT:    slli a0, a3, 56
+; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -1657,8 +1657,8 @@ define signext i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB21_1
 ; RV32I-NEXT:  .LBB21_4: # %atomicrmw.end
-; RV32I-NEXT:    slli a3, a3, 16
-; RV32I-NEXT:    srai a0, a3, 16
+; RV32I-NEXT:    slli a0, a3, 16
+; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -1733,8 +1733,8 @@ define signext i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB21_1
 ; RV64I-NEXT:  .LBB21_4: # %atomicrmw.end
-; RV64I-NEXT:    slli a3, a3, 48
-; RV64I-NEXT:    srai a0, a3, 48
+; RV64I-NEXT:    slli a0, a3, 48
+; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -1813,8 +1813,8 @@ define signext i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB22_1
 ; RV32I-NEXT:  .LBB22_4: # %atomicrmw.end
-; RV32I-NEXT:    slli a3, a3, 16
-; RV32I-NEXT:    srai a0, a3, 16
+; RV32I-NEXT:    slli a0, a3, 16
+; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -1889,8 +1889,8 @@ define signext i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB22_1
 ; RV64I-NEXT:  .LBB22_4: # %atomicrmw.end
-; RV64I-NEXT:    slli a3, a3, 48
-; RV64I-NEXT:    srai a0, a3, 48
+; RV64I-NEXT:    slli a0, a3, 48
+; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -1970,8 +1970,8 @@ define signext i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a2, s0
 ; RV32I-NEXT:    j .LBB23_1
 ; RV32I-NEXT:  .LBB23_4: # %atomicrmw.end
-; RV32I-NEXT:    slli a1, a1, 16
-; RV32I-NEXT:    srai a0, a1, 16
+; RV32I-NEXT:    slli a0, a1, 16
+; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -2042,8 +2042,8 @@ define signext i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a2, s0
 ; RV64I-NEXT:    j .LBB23_1
 ; RV64I-NEXT:  .LBB23_4: # %atomicrmw.end
-; RV64I-NEXT:    slli a1, a1, 48
-; RV64I-NEXT:    srai a0, a1, 48
+; RV64I-NEXT:    slli a0, a1, 48
+; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -2118,8 +2118,8 @@ define signext i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a2, s0
 ; RV32I-NEXT:    j .LBB24_1
 ; RV32I-NEXT:  .LBB24_4: # %atomicrmw.end
-; RV32I-NEXT:    slli a1, a1, 16
-; RV32I-NEXT:    srai a0, a1, 16
+; RV32I-NEXT:    slli a0, a1, 16
+; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -2190,8 +2190,8 @@ define signext i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a2, s0
 ; RV64I-NEXT:    j .LBB24_1
 ; RV64I-NEXT:  .LBB24_4: # %atomicrmw.end
-; RV64I-NEXT:    slli a1, a1, 48
-; RV64I-NEXT:    srai a0, a1, 48
+; RV64I-NEXT:    slli a0, a1, 48
+; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
index a684a14616ee..d14877a8aa95 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-half.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
@@ -19,8 +19,8 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    slli a1, a1, 16
-; RV32I-NEXT:    srli a0, a1, 16
+; RV32I-NEXT:    slli a0, a1, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __extendhfsf2 at plt
 ; RV32I-NEXT:    call __fixsfsi at plt
 ; RV32I-NEXT:    add a0, s0, a0
@@ -35,8 +35,8 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    slli a1, a1, 48
-; RV64I-NEXT:    srli a0, a1, 48
+; RV64I-NEXT:    slli a0, a1, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __extendhfsf2 at plt
 ; RV64I-NEXT:    call __fixsfdi at plt
 ; RV64I-NEXT:    addw a0, s0, a0

diff  --git a/llvm/test/CodeGen/RISCV/copysign-casts.ll b/llvm/test/CodeGen/RISCV/copysign-casts.ll
index 8d4417843780..6021a0ba1b7a 100644
--- a/llvm/test/CodeGen/RISCV/copysign-casts.ll
+++ b/llvm/test/CodeGen/RISCV/copysign-casts.ll
@@ -485,8 +485,8 @@ define half @fold_demote_h_d(half %a, double %b) nounwind {
 ;
 ; RV32IFZFH-LABEL: fold_demote_h_d:
 ; RV32IFZFH:       # %bb.0:
-; RV32IFZFH-NEXT:    srli a0, a1, 16
-; RV32IFZFH-NEXT:    fmv.h.x ft0, a0
+; RV32IFZFH-NEXT:    srli a1, a1, 16
+; RV32IFZFH-NEXT:    fmv.h.x ft0, a1
 ; RV32IFZFH-NEXT:    fsgnj.h fa0, fa0, ft0
 ; RV32IFZFH-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
index 03125de6b74e..a3538476694a 100644
--- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
@@ -297,9 +297,9 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    li a0, 32
 ; RV32I-NEXT:    beqz s0, .LBB3_4
 ; RV32I-NEXT:  .LBB3_2:
-; RV32I-NEXT:    srli a0, s1, 27
-; RV32I-NEXT:    add a0, s4, a0
-; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    srli s1, s1, 27
+; RV32I-NEXT:    add s1, s4, s1
+; RV32I-NEXT:    lbu a0, 0(s1)
 ; RV32I-NEXT:    j .LBB3_5
 ; RV32I-NEXT:  .LBB3_3:
 ; RV32I-NEXT:    srli a0, a0, 27
@@ -635,9 +635,9 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
 ; RV32I-NEXT:    addi a0, a0, 32
 ; RV32I-NEXT:    j .LBB7_3
 ; RV32I-NEXT:  .LBB7_2:
-; RV32I-NEXT:    srli a0, s0, 27
-; RV32I-NEXT:    add a0, s4, a0
-; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    srli s0, s0, 27
+; RV32I-NEXT:    add s0, s4, s0
+; RV32I-NEXT:    lbu a0, 0(s0)
 ; RV32I-NEXT:  .LBB7_3:
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/div-pow2.ll b/llvm/test/CodeGen/RISCV/div-pow2.ll
index 72054661026f..736645baecea 100644
--- a/llvm/test/CodeGen/RISCV/div-pow2.ll
+++ b/llvm/test/CodeGen/RISCV/div-pow2.ll
@@ -408,9 +408,9 @@ define i64 @sdiv64_pow2_8589934592(i64 %a) {
 ; RV32I-NEXT:    srai a1, a1, 31
 ; RV32I-NEXT:    add a1, a0, a1
 ; RV32I-NEXT:    sltu a0, a1, a0
-; RV32I-NEXT:    add a2, a2, a0
-; RV32I-NEXT:    srai a0, a2, 1
-; RV32I-NEXT:    srai a1, a2, 31
+; RV32I-NEXT:    add a1, a2, a0
+; RV32I-NEXT:    srai a0, a1, 1
+; RV32I-NEXT:    srai a1, a1, 31
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: sdiv64_pow2_8589934592:

diff  --git a/llvm/test/CodeGen/RISCV/float-arith.ll b/llvm/test/CodeGen/RISCV/float-arith.ll
index a6026a6b7079..4655d46686f3 100644
--- a/llvm/test/CodeGen/RISCV/float-arith.ll
+++ b/llvm/test/CodeGen/RISCV/float-arith.ll
@@ -239,8 +239,8 @@ define float @fsgnjn_s(float %a, float %b) nounwind {
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    slli s0, s0, 1
-; RV32I-NEXT:    srli a1, s0, 1
-; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli s0, s0, 1
+; RV32I-NEXT:    or a0, s0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -257,8 +257,8 @@ define float @fsgnjn_s(float %a, float %b) nounwind {
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    slli s0, s0, 33
-; RV64I-NEXT:    srli a1, s0, 33
-; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli s0, s0, 33
+; RV64I-NEXT:    or a0, s0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16

diff  --git a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
index 5e9d031f1b00..fc28e156801d 100644
--- a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
@@ -60,8 +60,8 @@ define <2 x i32> @utest_f64i32(<2 x double> %x) {
 ; CHECK-NOV-LABEL: utest_f64i32:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.lu.d a0, fa0, rtz
-; CHECK-NOV-NEXT:    li a1, -1
-; CHECK-NOV-NEXT:    srli a2, a1, 32
+; CHECK-NOV-NEXT:    li a2, -1
+; CHECK-NOV-NEXT:    srli a2, a2, 32
 ; CHECK-NOV-NEXT:    fcvt.lu.d a1, fa1, rtz
 ; CHECK-NOV-NEXT:    bgeu a0, a2, .LBB1_3
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -97,8 +97,8 @@ define <2 x i32> @ustest_f64i32(<2 x double> %x) {
 ; CHECK-NOV-LABEL: ustest_f64i32:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.l.d a0, fa0, rtz
-; CHECK-NOV-NEXT:    li a1, -1
-; CHECK-NOV-NEXT:    srli a2, a1, 32
+; CHECK-NOV-NEXT:    li a2, -1
+; CHECK-NOV-NEXT:    srli a2, a2, 32
 ; CHECK-NOV-NEXT:    fcvt.l.d a1, fa1, rtz
 ; CHECK-NOV-NEXT:    blt a0, a2, .LBB2_2
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -220,8 +220,8 @@ define <4 x i32> @utest_f32i32(<4 x float> %x) {
 ; CHECK-NOV-LABEL: utest_f32i32:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.lu.s a1, fa0, rtz
-; CHECK-NOV-NEXT:    li a2, -1
-; CHECK-NOV-NEXT:    srli a3, a2, 32
+; CHECK-NOV-NEXT:    li a3, -1
+; CHECK-NOV-NEXT:    srli a3, a3, 32
 ; CHECK-NOV-NEXT:    fcvt.lu.s a2, fa1, rtz
 ; CHECK-NOV-NEXT:    bgeu a1, a3, .LBB4_6
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -276,8 +276,8 @@ define <4 x i32> @ustest_f32i32(<4 x float> %x) {
 ; CHECK-NOV-LABEL: ustest_f32i32:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.l.s a1, fa0, rtz
-; CHECK-NOV-NEXT:    li a2, -1
-; CHECK-NOV-NEXT:    srli a4, a2, 32
+; CHECK-NOV-NEXT:    li a4, -1
+; CHECK-NOV-NEXT:    srli a4, a4, 32
 ; CHECK-NOV-NEXT:    fcvt.l.s a2, fa1, rtz
 ; CHECK-NOV-NEXT:    bge a1, a4, .LBB5_6
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -701,8 +701,8 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    mv a0, s1
 ; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
 ; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-NOV-NEXT:    li a1, -1
-; CHECK-NOV-NEXT:    srli a2, a1, 32
+; CHECK-NOV-NEXT:    li a2, -1
+; CHECK-NOV-NEXT:    srli a2, a2, 32
 ; CHECK-NOV-NEXT:    bge a0, a2, .LBB8_6
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
 ; CHECK-NOV-NEXT:    fcvt.l.s a1, fs1, rtz
@@ -3355,8 +3355,8 @@ define <2 x i32> @utest_f64i32_mm(<2 x double> %x) {
 ; CHECK-NOV-LABEL: utest_f64i32_mm:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.lu.d a0, fa0, rtz
-; CHECK-NOV-NEXT:    li a1, -1
-; CHECK-NOV-NEXT:    srli a2, a1, 32
+; CHECK-NOV-NEXT:    li a2, -1
+; CHECK-NOV-NEXT:    srli a2, a2, 32
 ; CHECK-NOV-NEXT:    fcvt.lu.d a1, fa1, rtz
 ; CHECK-NOV-NEXT:    bgeu a0, a2, .LBB28_3
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -3391,8 +3391,8 @@ define <2 x i32> @ustest_f64i32_mm(<2 x double> %x) {
 ; CHECK-NOV-LABEL: ustest_f64i32_mm:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.l.d a1, fa1, rtz
-; CHECK-NOV-NEXT:    li a0, -1
-; CHECK-NOV-NEXT:    srli a2, a0, 32
+; CHECK-NOV-NEXT:    li a2, -1
+; CHECK-NOV-NEXT:    srli a2, a2, 32
 ; CHECK-NOV-NEXT:    fcvt.l.d a0, fa0, rtz
 ; CHECK-NOV-NEXT:    blt a1, a2, .LBB29_2
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -3510,8 +3510,8 @@ define <4 x i32> @utest_f32i32_mm(<4 x float> %x) {
 ; CHECK-NOV-LABEL: utest_f32i32_mm:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.lu.s a1, fa0, rtz
-; CHECK-NOV-NEXT:    li a2, -1
-; CHECK-NOV-NEXT:    srli a3, a2, 32
+; CHECK-NOV-NEXT:    li a3, -1
+; CHECK-NOV-NEXT:    srli a3, a3, 32
 ; CHECK-NOV-NEXT:    fcvt.lu.s a2, fa1, rtz
 ; CHECK-NOV-NEXT:    bgeu a1, a3, .LBB31_6
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -3565,8 +3565,8 @@ define <4 x i32> @ustest_f32i32_mm(<4 x float> %x) {
 ; CHECK-NOV-LABEL: ustest_f32i32_mm:
 ; CHECK-NOV:       # %bb.0: # %entry
 ; CHECK-NOV-NEXT:    fcvt.l.s a1, fa3, rtz
-; CHECK-NOV-NEXT:    li a2, -1
-; CHECK-NOV-NEXT:    srli a4, a2, 32
+; CHECK-NOV-NEXT:    li a4, -1
+; CHECK-NOV-NEXT:    srli a4, a4, 32
 ; CHECK-NOV-NEXT:    fcvt.l.s a2, fa2, rtz
 ; CHECK-NOV-NEXT:    bge a1, a4, .LBB32_6
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -3985,8 +3985,8 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    mv a0, s1
 ; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
 ; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-NOV-NEXT:    li a1, -1
-; CHECK-NOV-NEXT:    srli a2, a1, 32
+; CHECK-NOV-NEXT:    li a2, -1
+; CHECK-NOV-NEXT:    srli a2, a2, 32
 ; CHECK-NOV-NEXT:    bge a0, a2, .LBB35_6
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
 ; CHECK-NOV-NEXT:    fcvt.l.s a1, fs1, rtz

diff  --git a/llvm/test/CodeGen/RISCV/half-arith.ll b/llvm/test/CodeGen/RISCV/half-arith.ll
index 53612aae60f4..ac9ff9c97786 100644
--- a/llvm/test/CodeGen/RISCV/half-arith.ll
+++ b/llvm/test/CodeGen/RISCV/half-arith.ll
@@ -439,8 +439,8 @@ define half @fsgnjn_s(half %a, half %b) nounwind {
 ; RV32I-NEXT:    lui a1, 1048568
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    slli s1, s1, 17
-; RV32I-NEXT:    srli a1, s1, 17
-; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli s1, s1, 17
+; RV32I-NEXT:    or a0, s1, a0
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -478,8 +478,8 @@ define half @fsgnjn_s(half %a, half %b) nounwind {
 ; RV64I-NEXT:    lui a1, 1048568
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    slli s1, s1, 49
-; RV64I-NEXT:    srli a1, s1, 49
-; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli s1, s1, 49
+; RV64I-NEXT:    or a0, s1, a0
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
index 16324c6a2c11..2671d5023b4f 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
@@ -138,8 +138,8 @@ declare i64 @llvm.fshl.i64(i64, i64, i64)
 define i64 @rol_i64(i64 %a, i64 %b) nounwind {
 ; CHECK-LABEL: rol_i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    slli a3, a2, 26
-; CHECK-NEXT:    srli a5, a3, 31
+; CHECK-NEXT:    slli a5, a2, 26
+; CHECK-NEXT:    srli a5, a5, 31
 ; CHECK-NEXT:    mv a4, a1
 ; CHECK-NEXT:    bnez a5, .LBB7_2
 ; CHECK-NEXT:  # %bb.1:
@@ -155,8 +155,8 @@ define i64 @rol_i64(i64 %a, i64 %b) nounwind {
 ; CHECK-NEXT:    srl a1, a1, a5
 ; CHECK-NEXT:    or a3, a3, a1
 ; CHECK-NEXT:    sll a0, a0, a2
-; CHECK-NEXT:    srli a1, a4, 1
-; CHECK-NEXT:    srl a1, a1, a5
+; CHECK-NEXT:    srli a4, a4, 1
+; CHECK-NEXT:    srl a1, a4, a5
 ; CHECK-NEXT:    or a1, a0, a1
 ; CHECK-NEXT:    mv a0, a3
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll
index 6d10db62d17b..55d571d25ab4 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll
@@ -232,9 +232,9 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    li a0, 32
 ; RV32I-NEXT:    beqz s0, .LBB3_4
 ; RV32I-NEXT:  .LBB3_2:
-; RV32I-NEXT:    srli a0, s1, 27
-; RV32I-NEXT:    add a0, s4, a0
-; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    srli s1, s1, 27
+; RV32I-NEXT:    add s1, s4, s1
+; RV32I-NEXT:    lbu a0, 0(s1)
 ; RV32I-NEXT:    j .LBB3_5
 ; RV32I-NEXT:  .LBB3_3:
 ; RV32I-NEXT:    srli a0, a0, 27

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbkb.ll
index a4dbbde0aa2f..9a434e2e6860 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbkb.ll
@@ -150,8 +150,8 @@ define i64 @packh_i64(i64 %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 255
 ; RV32I-NEXT:    slli a2, a2, 24
-; RV32I-NEXT:    srli a1, a2, 16
-; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a2, a2, 16
+; RV32I-NEXT:    or a0, a2, a0
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbkb.ll
index 5dca3f7a9f0f..7ac8b567d0c6 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbkb.ll
@@ -253,8 +253,8 @@ define i64 @pack_i64_allWUsers(i32 signext %0, i32 signext %1, i32 signext %2) {
 ; RV64I-NEXT:    addw a0, a1, a0
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    slli a2, a2, 32
-; RV64I-NEXT:    srli a1, a2, 32
-; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    srli a2, a2, 32
+; RV64I-NEXT:    or a0, a0, a2
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBKB-LABEL: pack_i64_allWUsers:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
index 28d29e820b91..89419747c97d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
@@ -109,10 +109,10 @@ define void @insertelt_v32i16(<32 x i16>* %x, i16 %y, i32 %idx) {
 ; RV64-NEXT:    vle16.v v8, (a0)
 ; RV64-NEXT:    vmv.s.x v12, a1
 ; RV64-NEXT:    slli a2, a2, 32
-; RV64-NEXT:    srli a1, a2, 32
-; RV64-NEXT:    addi a2, a1, 1
-; RV64-NEXT:    vsetvli zero, a2, e16, m4, tu, ma
-; RV64-NEXT:    vslideup.vx v8, v12, a1
+; RV64-NEXT:    srli a2, a2, 32
+; RV64-NEXT:    addi a1, a2, 1
+; RV64-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
+; RV64-NEXT:    vslideup.vx v8, v12, a2
 ; RV64-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
 ; RV64-NEXT:    vse16.v v8, (a0)
 ; RV64-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
index 0e40c22e00cf..5aa2380a6070 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
@@ -660,10 +660,10 @@ define <vscale x 1 x i64> @insertelt_nxv1i64_idx(<vscale x 1 x i64> %v, i64 %elt
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    slli a1, a1, 32
-; CHECK-NEXT:    srli a0, a1, 32
-; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
+; CHECK-NEXT:    srli a1, a1, 32
+; CHECK-NEXT:    addi a0, a1, 1
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 %idx
   ret <vscale x 1 x i64> %r
@@ -697,10 +697,10 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_idx(<vscale x 2 x i64> %v, i64 %elt
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    slli a1, a1, 32
-; CHECK-NEXT:    srli a0, a1, 32
-; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
+; CHECK-NEXT:    srli a1, a1, 32
+; CHECK-NEXT:    addi a0, a1, 1
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vslideup.vx v8, v10, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 %idx
   ret <vscale x 2 x i64> %r
@@ -734,10 +734,10 @@ define <vscale x 4 x i64> @insertelt_nxv4i64_idx(<vscale x 4 x i64> %v, i64 %elt
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v12, a0
 ; CHECK-NEXT:    slli a1, a1, 32
-; CHECK-NEXT:    srli a0, a1, 32
-; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
+; CHECK-NEXT:    srli a1, a1, 32
+; CHECK-NEXT:    addi a0, a1, 1
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vslideup.vx v8, v12, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 %idx
   ret <vscale x 4 x i64> %r
@@ -771,10 +771,10 @@ define <vscale x 8 x i64> @insertelt_nxv8i64_idx(<vscale x 8 x i64> %v, i64 %elt
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv.s.x v16, a0
 ; CHECK-NEXT:    slli a1, a1, 32
-; CHECK-NEXT:    srli a0, a1, 32
-; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
+; CHECK-NEXT:    srli a1, a1, 32
+; CHECK-NEXT:    addi a0, a1, 1
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vslideup.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 %idx
   ret <vscale x 8 x i64> %r

diff  --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
index 9803ab2089ed..5ce39e501f8a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
@@ -763,9 +763,9 @@ define void @strided_store_nxv17f64(<vscale x 17 x double> %v, double* %ptr, i32
 ; CHECK-RV32-NEXT:  # %bb.5:
 ; CHECK-RV32-NEXT:    mv a0, a4
 ; CHECK-RV32-NEXT:  .LBB36_6:
-; CHECK-RV32-NEXT:    srli a3, a4, 2
-; CHECK-RV32-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; CHECK-RV32-NEXT:    vslidedown.vx v0, v24, a3
+; CHECK-RV32-NEXT:    srli a4, a4, 2
+; CHECK-RV32-NEXT:    vsetvli a3, zero, e8, mf2, ta, ma
+; CHECK-RV32-NEXT:    vslidedown.vx v0, v24, a4
 ; CHECK-RV32-NEXT:    mul a3, a5, a2
 ; CHECK-RV32-NEXT:    add a1, a1, a3
 ; CHECK-RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -823,9 +823,9 @@ define void @strided_store_nxv17f64(<vscale x 17 x double> %v, double* %ptr, i32
 ; CHECK-RV64-NEXT:  # %bb.5:
 ; CHECK-RV64-NEXT:    mv a0, a4
 ; CHECK-RV64-NEXT:  .LBB36_6:
-; CHECK-RV64-NEXT:    srli a3, a4, 2
-; CHECK-RV64-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; CHECK-RV64-NEXT:    vslidedown.vx v0, v24, a3
+; CHECK-RV64-NEXT:    srli a4, a4, 2
+; CHECK-RV64-NEXT:    vsetvli a3, zero, e8, mf2, ta, ma
+; CHECK-RV64-NEXT:    vslidedown.vx v0, v24, a4
 ; CHECK-RV64-NEXT:    mul a3, a5, a2
 ; CHECK-RV64-NEXT:    add a1, a1, a3
 ; CHECK-RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/select-constant-xor.ll b/llvm/test/CodeGen/RISCV/select-constant-xor.ll
index 61dfef8f55ee..02e6dd4b1bdc 100644
--- a/llvm/test/CodeGen/RISCV/select-constant-xor.ll
+++ b/llvm/test/CodeGen/RISCV/select-constant-xor.ll
@@ -5,10 +5,10 @@
 define i32 @xori64i32(i64 %a) {
 ; RV32-LABEL: xori64i32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    srai a0, a1, 31
-; RV32-NEXT:    lui a1, 524288
-; RV32-NEXT:    addi a1, a1, -1
-; RV32-NEXT:    xor a0, a0, a1
+; RV32-NEXT:    srai a1, a1, 31
+; RV32-NEXT:    lui a0, 524288
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    xor a0, a1, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: xori64i32:

diff  --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll
index 46afdeaa251f..0cca251ca7b6 100644
--- a/llvm/test/CodeGen/RISCV/select.ll
+++ b/llvm/test/CodeGen/RISCV/select.ll
@@ -397,11 +397,11 @@ define i32 @select_udiv_3(i1 zeroext %cond, i32 %a) {
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    bnez a0, .LBB19_2
 ; RV32-NEXT:  # %bb.1: # %entry
-; RV32-NEXT:    srli a0, a1, 1
-; RV32-NEXT:    lui a1, 199729
-; RV32-NEXT:    addi a1, a1, -975
-; RV32-NEXT:    mulhu a0, a0, a1
-; RV32-NEXT:    srli a1, a0, 2
+; RV32-NEXT:    srli a1, a1, 1
+; RV32-NEXT:    lui a0, 199729
+; RV32-NEXT:    addi a0, a0, -975
+; RV32-NEXT:    mulhu a1, a1, a0
+; RV32-NEXT:    srli a1, a1, 2
 ; RV32-NEXT:  .LBB19_2: # %entry
 ; RV32-NEXT:    mv a0, a1
 ; RV32-NEXT:    ret
@@ -413,8 +413,8 @@ define i32 @select_udiv_3(i1 zeroext %cond, i32 %a) {
 ; RV64-NEXT:    srliw a0, a1, 1
 ; RV64-NEXT:    lui a1, 199729
 ; RV64-NEXT:    addiw a1, a1, -975
-; RV64-NEXT:    mul a0, a0, a1
-; RV64-NEXT:    srli a1, a0, 34
+; RV64-NEXT:    mul a1, a0, a1
+; RV64-NEXT:    srli a1, a1, 34
 ; RV64-NEXT:  .LBB19_2: # %entry
 ; RV64-NEXT:    mv a0, a1
 ; RV64-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index b2aa4f7d97e3..56f0fc2eaea2 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -326,8 +326,8 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32-NEXT:    or a0, a0, a1
 ; RV32-NEXT:    lw s4, 0(s0)
 ; RV32-NEXT:    slli a2, a2, 30
-; RV32-NEXT:    srli a1, a2, 31
-; RV32-NEXT:    neg a1, a1
+; RV32-NEXT:    srli a2, a2, 31
+; RV32-NEXT:    neg a1, a2
 ; RV32-NEXT:    li a2, 7
 ; RV32-NEXT:    li a3, 0
 ; RV32-NEXT:    call __moddi3 at plt
@@ -476,8 +476,8 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32M-NEXT:    or a0, a0, a1
 ; RV32M-NEXT:    lw s4, 0(s0)
 ; RV32M-NEXT:    slli a2, a2, 30
-; RV32M-NEXT:    srli a1, a2, 31
-; RV32M-NEXT:    neg a1, a1
+; RV32M-NEXT:    srli a2, a2, 31
+; RV32M-NEXT:    neg a1, a2
 ; RV32M-NEXT:    li a2, 7
 ; RV32M-NEXT:    li a3, 0
 ; RV32M-NEXT:    call __moddi3 at plt
@@ -591,8 +591,8 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64M-NEXT:    or a1, a3, a1
 ; RV64M-NEXT:    sw a1, 8(a0)
 ; RV64M-NEXT:    slli a2, a2, 29
-; RV64M-NEXT:    srli a1, a2, 61
-; RV64M-NEXT:    sb a1, 12(a0)
+; RV64M-NEXT:    srli a2, a2, 61
+; RV64M-NEXT:    sb a2, 12(a0)
 ; RV64M-NEXT:    ret
 ;
 ; RV32MV-LABEL: test_srem_vec:
@@ -781,8 +781,8 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64MV-NEXT:    sd a1, 0(a0)
 ; RV64MV-NEXT:    slli a2, a2, 2
 ; RV64MV-NEXT:    slli a3, a3, 31
-; RV64MV-NEXT:    srli a1, a3, 62
-; RV64MV-NEXT:    or a1, a1, a2
+; RV64MV-NEXT:    srli a3, a3, 62
+; RV64MV-NEXT:    or a1, a3, a2
 ; RV64MV-NEXT:    sw a1, 8(a0)
 ; RV64MV-NEXT:    addi sp, s0, -64
 ; RV64MV-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll
index b0641c73f0d9..bf9552cb591d 100644
--- a/llvm/test/CodeGen/RISCV/xaluo.ll
+++ b/llvm/test/CodeGen/RISCV/xaluo.ll
@@ -2537,9 +2537,9 @@ define i1 @smulo.not.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    add a0, a0, a1
 ; RV32-NEXT:    add a0, a5, a0
 ; RV32-NEXT:    add a0, a0, s0
-; RV32-NEXT:    srai a1, a4, 31
-; RV32-NEXT:    xor a0, a0, a1
-; RV32-NEXT:    xor a1, t6, a1
+; RV32-NEXT:    srai a4, a4, 31
+; RV32-NEXT:    xor a0, a0, a4
+; RV32-NEXT:    xor a1, t6, a4
 ; RV32-NEXT:    or a0, a1, a0
 ; RV32-NEXT:    seqz a0, a0
 ; RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
@@ -2600,9 +2600,9 @@ define i1 @smulo.not.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    add a0, a0, a1
 ; RV32ZBA-NEXT:    add a0, a5, a0
 ; RV32ZBA-NEXT:    add a0, a0, s0
-; RV32ZBA-NEXT:    srai a1, a4, 31
-; RV32ZBA-NEXT:    xor a0, a0, a1
-; RV32ZBA-NEXT:    xor a1, t6, a1
+; RV32ZBA-NEXT:    srai a4, a4, 31
+; RV32ZBA-NEXT:    xor a0, a0, a4
+; RV32ZBA-NEXT:    xor a1, t6, a4
 ; RV32ZBA-NEXT:    or a0, a1, a0
 ; RV32ZBA-NEXT:    seqz a0, a0
 ; RV32ZBA-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
@@ -3493,9 +3493,9 @@ define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    add a0, a0, a1
 ; RV32-NEXT:    add a0, a5, a0
 ; RV32-NEXT:    add a0, a0, s0
-; RV32-NEXT:    srai a1, a4, 31
-; RV32-NEXT:    xor a0, a0, a1
-; RV32-NEXT:    xor a1, t6, a1
+; RV32-NEXT:    srai a4, a4, 31
+; RV32-NEXT:    xor a0, a0, a4
+; RV32-NEXT:    xor a1, t6, a4
 ; RV32-NEXT:    or a0, a1, a0
 ; RV32-NEXT:    beqz a0, .LBB61_2
 ; RV32-NEXT:  # %bb.1: # %overflow
@@ -3566,9 +3566,9 @@ define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    add a0, a0, a1
 ; RV32ZBA-NEXT:    add a0, a5, a0
 ; RV32ZBA-NEXT:    add a0, a0, s0
-; RV32ZBA-NEXT:    srai a1, a4, 31
-; RV32ZBA-NEXT:    xor a0, a0, a1
-; RV32ZBA-NEXT:    xor a1, t6, a1
+; RV32ZBA-NEXT:    srai a4, a4, 31
+; RV32ZBA-NEXT:    xor a0, a0, a4
+; RV32ZBA-NEXT:    xor a1, t6, a4
 ; RV32ZBA-NEXT:    or a0, a1, a0
 ; RV32ZBA-NEXT:    beqz a0, .LBB61_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
@@ -3643,9 +3643,9 @@ define zeroext i1 @smulo2.br.i64(i64 %v1) {
 ; RV32-NEXT:    add a0, a0, a5
 ; RV32-NEXT:    add a0, a4, a0
 ; RV32-NEXT:    add a0, a0, t5
-; RV32-NEXT:    srai a1, a3, 31
-; RV32-NEXT:    xor a0, a0, a1
-; RV32-NEXT:    xor a1, t4, a1
+; RV32-NEXT:    srai a3, a3, 31
+; RV32-NEXT:    xor a0, a0, a3
+; RV32-NEXT:    xor a1, t4, a3
 ; RV32-NEXT:    or a0, a1, a0
 ; RV32-NEXT:    beqz a0, .LBB62_2
 ; RV32-NEXT:  # %bb.1: # %overflow
@@ -3705,9 +3705,9 @@ define zeroext i1 @smulo2.br.i64(i64 %v1) {
 ; RV32ZBA-NEXT:    add a0, a0, a5
 ; RV32ZBA-NEXT:    add a0, a4, a0
 ; RV32ZBA-NEXT:    add a0, a0, t5
-; RV32ZBA-NEXT:    srai a1, a3, 31
-; RV32ZBA-NEXT:    xor a0, a0, a1
-; RV32ZBA-NEXT:    xor a1, t4, a1
+; RV32ZBA-NEXT:    srai a3, a3, 31
+; RV32ZBA-NEXT:    xor a0, a0, a3
+; RV32ZBA-NEXT:    xor a1, t4, a3
 ; RV32ZBA-NEXT:    or a0, a1, a0
 ; RV32ZBA-NEXT:    beqz a0, .LBB62_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow


        


More information about the llvm-commits mailing list