[llvm] 64612f5 - [RISCV] Add ADD to getRegAllocationHints to improve to improve use of c.add.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 25 09:00:20 PST 2022


Author: Craig Topper
Date: 2022-11-25T08:59:27-08:00
New Revision: 64612f5d8e35a47008554e646e5b434c4bf738ae

URL: https://github.com/llvm/llvm-project/commit/64612f5d8e35a47008554e646e5b434c4bf738ae
DIFF: https://github.com/llvm/llvm-project/commit/64612f5d8e35a47008554e646e5b434c4bf738ae.diff

LOG: [RISCV] Add ADD to getRegAllocationHints to improve to improve use of c.add.

add can always be compressed to c.add if one of the sources is the
same as the destination.

The same is not true for c.addw where the registers need to be x8-x15.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
    llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
    llvm/test/CodeGen/RISCV/div-by-constant.ll
    llvm/test/CodeGen/RISCV/div-pow2.ll
    llvm/test/CodeGen/RISCV/div.ll
    llvm/test/CodeGen/RISCV/mul.ll
    llvm/test/CodeGen/RISCV/rotl-rotr.ll
    llvm/test/CodeGen/RISCV/rv32zba.ll
    llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
    llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
    llvm/test/CodeGen/RISCV/select-binop-identity.ll
    llvm/test/CodeGen/RISCV/shadowcallstack.ll
    llvm/test/CodeGen/RISCV/split-offsets.ll
    llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
    llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
    llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
    llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll
    llvm/test/CodeGen/RISCV/urem-vector-lkk.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 7c42eb1ea221..57370eb2c377 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -419,6 +419,7 @@ bool RISCVRegisterInfo::getRegAllocationHints(
     switch (MI.getOpcode()) {
     default:
       return false;
+    case RISCV::ADD:
     case RISCV::SLLI:
       return true;
     case RISCV::ADDI:

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
index f5d316e68474..8d38a2e749af 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
@@ -335,8 +335,8 @@ define i64 @callee_aligned_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i128 %f
 ; RV64I-NEXT:    ld a1, 0(sp)
 ; RV64I-NEXT:    ld a2, 16(sp)
 ; RV64I-NEXT:    ld a3, 32(sp)
-; RV64I-NEXT:    add a4, a5, a7
-; RV64I-NEXT:    add a1, a4, a1
+; RV64I-NEXT:    add a5, a5, a7
+; RV64I-NEXT:    add a1, a5, a1
 ; RV64I-NEXT:    add a1, a1, a2
 ; RV64I-NEXT:    add a1, a1, a3
 ; RV64I-NEXT:    add a0, a1, a0

diff  --git a/llvm/test/CodeGen/RISCV/div-by-constant.ll b/llvm/test/CodeGen/RISCV/div-by-constant.ll
index d25a47b23d55..9198cf478f3a 100644
--- a/llvm/test/CodeGen/RISCV/div-by-constant.ll
+++ b/llvm/test/CodeGen/RISCV/div-by-constant.ll
@@ -83,8 +83,8 @@ define i64 @udiv64_constant_no_add(i64 %a) nounwind {
 ; RV32-NEXT:    add a3, a6, a3
 ; RV32-NEXT:    sltu a0, a0, a2
 ; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    mul a0, a0, a4
-; RV32-NEXT:    add a1, a3, a0
+; RV32-NEXT:    mul a1, a0, a4
+; RV32-NEXT:    add a1, a3, a1
 ; RV32-NEXT:    mul a0, a5, a4
 ; RV32-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/div-pow2.ll b/llvm/test/CodeGen/RISCV/div-pow2.ll
index 736645baecea..72054661026f 100644
--- a/llvm/test/CodeGen/RISCV/div-pow2.ll
+++ b/llvm/test/CodeGen/RISCV/div-pow2.ll
@@ -408,9 +408,9 @@ define i64 @sdiv64_pow2_8589934592(i64 %a) {
 ; RV32I-NEXT:    srai a1, a1, 31
 ; RV32I-NEXT:    add a1, a0, a1
 ; RV32I-NEXT:    sltu a0, a1, a0
-; RV32I-NEXT:    add a1, a2, a0
-; RV32I-NEXT:    srai a0, a1, 1
-; RV32I-NEXT:    srai a1, a1, 31
+; RV32I-NEXT:    add a2, a2, a0
+; RV32I-NEXT:    srai a0, a2, 1
+; RV32I-NEXT:    srai a1, a2, 31
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: sdiv64_pow2_8589934592:

diff  --git a/llvm/test/CodeGen/RISCV/div.ll b/llvm/test/CodeGen/RISCV/div.ll
index 6f614569353a..cd7fc00fbb36 100644
--- a/llvm/test/CodeGen/RISCV/div.ll
+++ b/llvm/test/CodeGen/RISCV/div.ll
@@ -198,8 +198,8 @@ define i64 @udiv64_constant(i64 %a) nounwind {
 ; RV32IM-NEXT:    add a3, a6, a3
 ; RV32IM-NEXT:    sltu a0, a0, a2
 ; RV32IM-NEXT:    sub a0, a1, a0
-; RV32IM-NEXT:    mul a0, a0, a4
-; RV32IM-NEXT:    add a1, a3, a0
+; RV32IM-NEXT:    mul a1, a0, a4
+; RV32IM-NEXT:    add a1, a3, a1
 ; RV32IM-NEXT:    mul a0, a5, a4
 ; RV32IM-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll
index e30e5e69b846..fbaa0040c3f6 100644
--- a/llvm/test/CodeGen/RISCV/mul.ll
+++ b/llvm/test/CodeGen/RISCV/mul.ll
@@ -1412,8 +1412,8 @@ define i64 @mulhsu_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __muldi3 at plt
-; RV32I-NEXT:    add a2, a0, s5
-; RV32I-NEXT:    sltu a0, a2, a0
+; RV32I-NEXT:    add s5, a0, s5
+; RV32I-NEXT:    sltu a0, s5, a0
 ; RV32I-NEXT:    add a0, a1, a0
 ; RV32I-NEXT:    add s8, s7, a0
 ; RV32I-NEXT:    mv a0, s0
@@ -1436,18 +1436,18 @@ define i64 @mulhsu_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    mv a3, s0
 ; RV32I-NEXT:    call __muldi3 at plt
-; RV32I-NEXT:    add a3, a0, s2
-; RV32I-NEXT:    add a2, s9, a3
-; RV32I-NEXT:    sltu a4, a2, s9
-; RV32I-NEXT:    sltu a5, s9, s5
-; RV32I-NEXT:    sltu a6, s8, s7
-; RV32I-NEXT:    add a6, s6, a6
-; RV32I-NEXT:    add a5, a6, a5
+; RV32I-NEXT:    add s2, a0, s2
+; RV32I-NEXT:    add a2, s9, s2
+; RV32I-NEXT:    sltu a3, a2, s9
+; RV32I-NEXT:    sltu a4, s9, s5
+; RV32I-NEXT:    sltu a5, s8, s7
+; RV32I-NEXT:    add a5, s6, a5
+; RV32I-NEXT:    add a4, a5, a4
 ; RV32I-NEXT:    add a1, a1, s3
-; RV32I-NEXT:    sltu a0, a3, a0
+; RV32I-NEXT:    sltu a0, s2, a0
 ; RV32I-NEXT:    add a0, a1, a0
-; RV32I-NEXT:    add a0, a5, a0
-; RV32I-NEXT:    add a1, a0, a4
+; RV32I-NEXT:    add a0, a4, a0
+; RV32I-NEXT:    add a1, a0, a3
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/rotl-rotr.ll
index 3992992bc9ba..9b13bd394feb 100644
--- a/llvm/test/CodeGen/RISCV/rotl-rotr.ll
+++ b/llvm/test/CodeGen/RISCV/rotl-rotr.ll
@@ -1076,8 +1076,8 @@ define signext i64 @rotl_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign
 ; RV32I-NEXT:    and a0, a0, a2
 ; RV32I-NEXT:    add a0, a1, a0
 ; RV32I-NEXT:    sltu a1, a0, a1
-; RV32I-NEXT:    add a2, a5, a3
-; RV32I-NEXT:    add a1, a2, a1
+; RV32I-NEXT:    add a3, a5, a3
+; RV32I-NEXT:    add a1, a3, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: rotl_64_mask_shared:
@@ -1131,8 +1131,8 @@ define signext i64 @rotl_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign
 ; RV32ZBB-NEXT:    and a0, a0, a2
 ; RV32ZBB-NEXT:    add a0, a1, a0
 ; RV32ZBB-NEXT:    sltu a1, a0, a1
-; RV32ZBB-NEXT:    add a2, a5, a3
-; RV32ZBB-NEXT:    add a1, a2, a1
+; RV32ZBB-NEXT:    add a3, a5, a3
+; RV32ZBB-NEXT:    add a1, a3, a1
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: rotl_64_mask_shared:
@@ -1549,10 +1549,10 @@ define i64 @rotr_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind {
 ; RV32I-NEXT:    slli a6, a6, 1
 ; RV32I-NEXT:    sll a0, a6, a0
 ; RV32I-NEXT:    or a0, a0, a3
-; RV32I-NEXT:    add a3, a7, a0
+; RV32I-NEXT:    add a7, a7, a0
 ; RV32I-NEXT:    add a0, a1, a2
 ; RV32I-NEXT:    sltu a1, a0, a1
-; RV32I-NEXT:    add a1, a3, a1
+; RV32I-NEXT:    add a1, a7, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: rotr_64_mask_multiple:
@@ -1605,10 +1605,10 @@ define i64 @rotr_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind {
 ; RV32ZBB-NEXT:    slli a6, a6, 1
 ; RV32ZBB-NEXT:    sll a0, a6, a0
 ; RV32ZBB-NEXT:    or a0, a0, a3
-; RV32ZBB-NEXT:    add a3, a7, a0
+; RV32ZBB-NEXT:    add a7, a7, a0
 ; RV32ZBB-NEXT:    add a0, a1, a2
 ; RV32ZBB-NEXT:    sltu a1, a0, a1
-; RV32ZBB-NEXT:    add a1, a3, a1
+; RV32ZBB-NEXT:    add a1, a7, a1
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: rotr_64_mask_multiple:

diff  --git a/llvm/test/CodeGen/RISCV/rv32zba.ll b/llvm/test/CodeGen/RISCV/rv32zba.ll
index f39fa6a625b8..cc93117b3cb8 100644
--- a/llvm/test/CodeGen/RISCV/rv32zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zba.ll
@@ -44,9 +44,9 @@ define i64 @sh3add(i64 %0, i64* %1) {
 ; RV32I-LABEL: sh3add:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slli a0, a0, 3
-; RV32I-NEXT:    add a1, a2, a0
-; RV32I-NEXT:    lw a0, 0(a1)
-; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    add a2, a2, a0
+; RV32I-NEXT:    lw a0, 0(a2)
+; RV32I-NEXT:    lw a1, 4(a2)
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: sh3add:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
index 9bc394d24849..bede78459b7b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
@@ -82,10 +82,10 @@ define fastcc <vscale x 64 x i32> @ret_split_nxv64i32(<vscale x 64 x i32>* %x) {
 ; CHECK-NEXT:    vl8re32.v v24, (a1)
 ; CHECK-NEXT:    vl8re32.v v0, (a5)
 ; CHECK-NEXT:    vs8r.v v16, (a0)
-; CHECK-NEXT:    add a1, a0, a2
-; CHECK-NEXT:    vs8r.v v24, (a1)
-; CHECK-NEXT:    add a1, a0, a4
-; CHECK-NEXT:    vs8r.v v0, (a1)
+; CHECK-NEXT:    add a2, a0, a2
+; CHECK-NEXT:    vs8r.v v24, (a2)
+; CHECK-NEXT:    add a4, a0, a4
+; CHECK-NEXT:    vs8r.v v0, (a4)
 ; CHECK-NEXT:    add a0, a0, a3
 ; CHECK-NEXT:    vs8r.v v8, (a0)
 ; CHECK-NEXT:    ret
@@ -148,30 +148,30 @@ define fastcc <vscale x 128 x i32> @ret_split_nxv128i32(<vscale x 128 x i32>* %x
 ; CHECK-NEXT:    vl8re32.v v16, (t3)
 ; CHECK-NEXT:    vl8re32.v v24, (t2)
 ; CHECK-NEXT:    vs8r.v v8, (a0)
-; CHECK-NEXT:    add a1, a0, a2
-; CHECK-NEXT:    vs8r.v v16, (a1)
-; CHECK-NEXT:    add a1, a0, t1
-; CHECK-NEXT:    vs8r.v v24, (a1)
-; CHECK-NEXT:    add a1, a0, a7
-; CHECK-NEXT:    vs8r.v v0, (a1)
-; CHECK-NEXT:    add a1, a0, a6
-; CHECK-NEXT:    addi a2, sp, 16
-; CHECK-NEXT:    vl8re8.v v8, (a2) # Unknown-size Folded Reload
-; CHECK-NEXT:    vs8r.v v8, (a1)
-; CHECK-NEXT:    add a1, a0, a5
-; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    slli a2, a2, 3
-; CHECK-NEXT:    add a2, sp, a2
-; CHECK-NEXT:    addi a2, a2, 16
-; CHECK-NEXT:    vl8re8.v v8, (a2) # Unknown-size Folded Reload
-; CHECK-NEXT:    vs8r.v v8, (a1)
-; CHECK-NEXT:    add a1, a0, a4
-; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    slli a2, a2, 4
-; CHECK-NEXT:    add a2, sp, a2
-; CHECK-NEXT:    addi a2, a2, 16
-; CHECK-NEXT:    vl8re8.v v8, (a2) # Unknown-size Folded Reload
-; CHECK-NEXT:    vs8r.v v8, (a1)
+; CHECK-NEXT:    add a2, a0, a2
+; CHECK-NEXT:    vs8r.v v16, (a2)
+; CHECK-NEXT:    add t1, a0, t1
+; CHECK-NEXT:    vs8r.v v24, (t1)
+; CHECK-NEXT:    add a7, a0, a7
+; CHECK-NEXT:    vs8r.v v0, (a7)
+; CHECK-NEXT:    add a6, a0, a6
+; CHECK-NEXT:    addi a1, sp, 16
+; CHECK-NEXT:    vl8re8.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT:    vs8r.v v8, (a6)
+; CHECK-NEXT:    add a5, a0, a5
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vl8re8.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT:    vs8r.v v8, (a5)
+; CHECK-NEXT:    add a4, a0, a4
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 4
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vl8re8.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT:    vs8r.v v8, (a4)
 ; CHECK-NEXT:    add a0, a0, a3
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    li a2, 24

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
index e827619e11c0..f1fd3343cfdd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
@@ -151,18 +151,18 @@ define i1 @extractelt_nxv128i1(<vscale x 128 x i8>* %x, i64 %idx) nounwind {
 ; RV32-NEXT:    add a4, a0, a2
 ; RV32-NEXT:    vl8r.v v16, (a4)
 ; RV32-NEXT:    vl8r.v v24, (a0)
-; RV32-NEXT:    add a0, a3, a1
-; RV32-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
+; RV32-NEXT:    add a1, a3, a1
+; RV32-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; RV32-NEXT:    vmseq.vi v8, v16, 0
 ; RV32-NEXT:    vmseq.vi v0, v24, 0
 ; RV32-NEXT:    vmv.v.i v16, 0
 ; RV32-NEXT:    vmerge.vim v24, v16, 1, v0
 ; RV32-NEXT:    vs8r.v v24, (a3)
-; RV32-NEXT:    add a1, a3, a2
+; RV32-NEXT:    add a2, a3, a2
 ; RV32-NEXT:    vmv1r.v v0, v8
 ; RV32-NEXT:    vmerge.vim v8, v16, 1, v0
-; RV32-NEXT:    vs8r.v v8, (a1)
-; RV32-NEXT:    lb a0, 0(a0)
+; RV32-NEXT:    vs8r.v v8, (a2)
+; RV32-NEXT:    lb a0, 0(a1)
 ; RV32-NEXT:    addi sp, s0, -80
 ; RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
@@ -191,18 +191,18 @@ define i1 @extractelt_nxv128i1(<vscale x 128 x i8>* %x, i64 %idx) nounwind {
 ; RV64-NEXT:    add a4, a0, a2
 ; RV64-NEXT:    vl8r.v v16, (a4)
 ; RV64-NEXT:    vl8r.v v24, (a0)
-; RV64-NEXT:    add a0, a3, a1
-; RV64-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
+; RV64-NEXT:    add a1, a3, a1
+; RV64-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; RV64-NEXT:    vmseq.vi v8, v16, 0
 ; RV64-NEXT:    vmseq.vi v0, v24, 0
 ; RV64-NEXT:    vmv.v.i v16, 0
 ; RV64-NEXT:    vmerge.vim v24, v16, 1, v0
 ; RV64-NEXT:    vs8r.v v24, (a3)
-; RV64-NEXT:    add a1, a3, a2
+; RV64-NEXT:    add a2, a3, a2
 ; RV64-NEXT:    vmv1r.v v0, v8
 ; RV64-NEXT:    vmerge.vim v8, v16, 1, v0
-; RV64-NEXT:    vs8r.v v8, (a1)
-; RV64-NEXT:    lb a0, 0(a0)
+; RV64-NEXT:    vs8r.v v8, (a2)
+; RV64-NEXT:    lb a0, 0(a1)
 ; RV64-NEXT:    addi sp, s0, -80
 ; RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll
index 4e2971453eb5..db93fa8e415e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll
@@ -35,15 +35,15 @@ define void @add_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; RV32-NEXT:    lw t0, 8(a1)
 ; RV32-NEXT:    lw a1, 12(a1)
 ; RV32-NEXT:    add a5, a5, a6
-; RV32-NEXT:    add a6, a4, a7
-; RV32-NEXT:    sltu a4, a6, a4
+; RV32-NEXT:    add a7, a4, a7
+; RV32-NEXT:    sltu a4, a7, a4
 ; RV32-NEXT:    add a4, a5, a4
 ; RV32-NEXT:    add a1, a3, a1
-; RV32-NEXT:    add a3, a2, t0
-; RV32-NEXT:    sltu a2, a3, a2
+; RV32-NEXT:    add t0, a2, t0
+; RV32-NEXT:    sltu a2, t0, a2
 ; RV32-NEXT:    add a1, a1, a2
-; RV32-NEXT:    sw a3, 8(a0)
-; RV32-NEXT:    sw a6, 0(a0)
+; RV32-NEXT:    sw t0, 8(a0)
+; RV32-NEXT:    sw a7, 0(a0)
 ; RV32-NEXT:    sw a1, 12(a0)
 ; RV32-NEXT:    sw a4, 4(a0)
 ; RV32-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index 57df6eb50f39..e75067c26282 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -6006,8 +6006,8 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, i64* %base, <8 x i64> %idxs,
 ; RV64ZVE32F-NEXT:    beqz a0, .LBB51_9
 ; RV64ZVE32F-NEXT:  .LBB51_8: # %cond.store13
 ; RV64ZVE32F-NEXT:    slli a5, a5, 3
-; RV64ZVE32F-NEXT:    add a0, a1, a5
-; RV64ZVE32F-NEXT:    sd a3, 0(a0)
+; RV64ZVE32F-NEXT:    add a1, a1, a5
+; RV64ZVE32F-NEXT:    sd a3, 0(a1)
 ; RV64ZVE32F-NEXT:  .LBB51_9: # %else14
 ; RV64ZVE32F-NEXT:    ld s0, 24(sp) # 8-byte Folded Reload
 ; RV64ZVE32F-NEXT:    ld s1, 16(sp) # 8-byte Folded Reload
@@ -6025,38 +6025,38 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, i64* %base, <8 x i64> %idxs,
 ; RV64ZVE32F-NEXT:    beqz a0, .LBB51_2
 ; RV64ZVE32F-NEXT:  .LBB51_11: # %cond.store1
 ; RV64ZVE32F-NEXT:    slli s2, s2, 3
-; RV64ZVE32F-NEXT:    add a0, a1, s2
-; RV64ZVE32F-NEXT:    sd s1, 0(a0)
+; RV64ZVE32F-NEXT:    add s2, a1, s2
+; RV64ZVE32F-NEXT:    sd s1, 0(s2)
 ; RV64ZVE32F-NEXT:    andi a0, a7, 4
 ; RV64ZVE32F-NEXT:    beqz a0, .LBB51_3
 ; RV64ZVE32F-NEXT:  .LBB51_12: # %cond.store3
 ; RV64ZVE32F-NEXT:    slli s0, s0, 3
-; RV64ZVE32F-NEXT:    add a0, a1, s0
-; RV64ZVE32F-NEXT:    sd t6, 0(a0)
+; RV64ZVE32F-NEXT:    add s0, a1, s0
+; RV64ZVE32F-NEXT:    sd t6, 0(s0)
 ; RV64ZVE32F-NEXT:    andi a0, a7, 8
 ; RV64ZVE32F-NEXT:    beqz a0, .LBB51_4
 ; RV64ZVE32F-NEXT:  .LBB51_13: # %cond.store5
 ; RV64ZVE32F-NEXT:    slli t5, t5, 3
-; RV64ZVE32F-NEXT:    add a0, a1, t5
-; RV64ZVE32F-NEXT:    sd t3, 0(a0)
+; RV64ZVE32F-NEXT:    add t5, a1, t5
+; RV64ZVE32F-NEXT:    sd t3, 0(t5)
 ; RV64ZVE32F-NEXT:    andi a0, a7, 16
 ; RV64ZVE32F-NEXT:    beqz a0, .LBB51_5
 ; RV64ZVE32F-NEXT:  .LBB51_14: # %cond.store7
 ; RV64ZVE32F-NEXT:    slli t4, t4, 3
-; RV64ZVE32F-NEXT:    add a0, a1, t4
-; RV64ZVE32F-NEXT:    sd t1, 0(a0)
+; RV64ZVE32F-NEXT:    add t4, a1, t4
+; RV64ZVE32F-NEXT:    sd t1, 0(t4)
 ; RV64ZVE32F-NEXT:    andi a0, a7, 32
 ; RV64ZVE32F-NEXT:    beqz a0, .LBB51_6
 ; RV64ZVE32F-NEXT:  .LBB51_15: # %cond.store9
 ; RV64ZVE32F-NEXT:    slli t2, t2, 3
-; RV64ZVE32F-NEXT:    add a0, a1, t2
-; RV64ZVE32F-NEXT:    sd a6, 0(a0)
+; RV64ZVE32F-NEXT:    add t2, a1, t2
+; RV64ZVE32F-NEXT:    sd a6, 0(t2)
 ; RV64ZVE32F-NEXT:    andi a0, a7, 64
 ; RV64ZVE32F-NEXT:    beqz a0, .LBB51_7
 ; RV64ZVE32F-NEXT:  .LBB51_16: # %cond.store11
 ; RV64ZVE32F-NEXT:    slli t0, t0, 3
-; RV64ZVE32F-NEXT:    add a0, a1, t0
-; RV64ZVE32F-NEXT:    sd a4, 0(a0)
+; RV64ZVE32F-NEXT:    add t0, a1, t0
+; RV64ZVE32F-NEXT:    sd a4, 0(t0)
 ; RV64ZVE32F-NEXT:    andi a0, a7, -128
 ; RV64ZVE32F-NEXT:    bnez a0, .LBB51_8
 ; RV64ZVE32F-NEXT:    j .LBB51_9
@@ -10702,38 +10702,38 @@ define void @mscatter_baseidx_v8f64(<8 x double> %val, double* %base, <8 x i64>
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB90_2
 ; RV64ZVE32F-NEXT:  .LBB90_10: # %cond.store1
 ; RV64ZVE32F-NEXT:    slli t1, t1, 3
-; RV64ZVE32F-NEXT:    add a1, a0, t1
-; RV64ZVE32F-NEXT:    fsd fa1, 0(a1)
+; RV64ZVE32F-NEXT:    add t1, a0, t1
+; RV64ZVE32F-NEXT:    fsd fa1, 0(t1)
 ; RV64ZVE32F-NEXT:    andi a1, a3, 4
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB90_3
 ; RV64ZVE32F-NEXT:  .LBB90_11: # %cond.store3
 ; RV64ZVE32F-NEXT:    slli t0, t0, 3
-; RV64ZVE32F-NEXT:    add a1, a0, t0
-; RV64ZVE32F-NEXT:    fsd fa2, 0(a1)
+; RV64ZVE32F-NEXT:    add t0, a0, t0
+; RV64ZVE32F-NEXT:    fsd fa2, 0(t0)
 ; RV64ZVE32F-NEXT:    andi a1, a3, 8
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB90_4
 ; RV64ZVE32F-NEXT:  .LBB90_12: # %cond.store5
 ; RV64ZVE32F-NEXT:    slli a7, a7, 3
-; RV64ZVE32F-NEXT:    add a1, a0, a7
-; RV64ZVE32F-NEXT:    fsd fa3, 0(a1)
+; RV64ZVE32F-NEXT:    add a7, a0, a7
+; RV64ZVE32F-NEXT:    fsd fa3, 0(a7)
 ; RV64ZVE32F-NEXT:    andi a1, a3, 16
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB90_5
 ; RV64ZVE32F-NEXT:  .LBB90_13: # %cond.store7
 ; RV64ZVE32F-NEXT:    slli a6, a6, 3
-; RV64ZVE32F-NEXT:    add a1, a0, a6
-; RV64ZVE32F-NEXT:    fsd fa4, 0(a1)
+; RV64ZVE32F-NEXT:    add a6, a0, a6
+; RV64ZVE32F-NEXT:    fsd fa4, 0(a6)
 ; RV64ZVE32F-NEXT:    andi a1, a3, 32
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB90_6
 ; RV64ZVE32F-NEXT:  .LBB90_14: # %cond.store9
 ; RV64ZVE32F-NEXT:    slli a5, a5, 3
-; RV64ZVE32F-NEXT:    add a1, a0, a5
-; RV64ZVE32F-NEXT:    fsd fa5, 0(a1)
+; RV64ZVE32F-NEXT:    add a5, a0, a5
+; RV64ZVE32F-NEXT:    fsd fa5, 0(a5)
 ; RV64ZVE32F-NEXT:    andi a1, a3, 64
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB90_7
 ; RV64ZVE32F-NEXT:  .LBB90_15: # %cond.store11
 ; RV64ZVE32F-NEXT:    slli a4, a4, 3
-; RV64ZVE32F-NEXT:    add a1, a0, a4
-; RV64ZVE32F-NEXT:    fsd fa6, 0(a1)
+; RV64ZVE32F-NEXT:    add a4, a0, a4
+; RV64ZVE32F-NEXT:    fsd fa6, 0(a4)
 ; RV64ZVE32F-NEXT:    andi a1, a3, -128
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB90_8
 ; RV64ZVE32F-NEXT:  .LBB90_16: # %cond.store13

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
index bf4fb19689f3..6dc46ae1d938 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
@@ -592,21 +592,21 @@ define void @vlmax(i64 %N, double* %c, double* %a, double* %b) {
 ; CHECK-NEXT:    vsetvli a6, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    blez a0, .LBB11_3
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
-; CHECK-NEXT:    li a5, 0
+; CHECK-NEXT:    li a4, 0
 ; CHECK-NEXT:    li t1, 0
 ; CHECK-NEXT:    slli a7, a6, 3
 ; CHECK-NEXT:  .LBB11_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    add t0, a2, a5
+; CHECK-NEXT:    add t0, a2, a4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (t0)
-; CHECK-NEXT:    add a4, a3, a5
-; CHECK-NEXT:    vle64.v v9, (a4)
+; CHECK-NEXT:    add a5, a3, a4
+; CHECK-NEXT:    vle64.v v9, (a5)
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    add a4, a1, a5
-; CHECK-NEXT:    vse64.v v8, (a4)
+; CHECK-NEXT:    add a5, a1, a4
+; CHECK-NEXT:    vse64.v v8, (a5)
 ; CHECK-NEXT:    add t1, t1, a6
-; CHECK-NEXT:    add a5, a5, a7
+; CHECK-NEXT:    add a4, a4, a7
 ; CHECK-NEXT:    blt t1, a0, .LBB11_2
 ; CHECK-NEXT:  .LBB11_3: # %for.end
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/select-binop-identity.ll b/llvm/test/CodeGen/RISCV/select-binop-identity.ll
index b22988a7ec86..33f22e8abb5a 100644
--- a/llvm/test/CodeGen/RISCV/select-binop-identity.ll
+++ b/llvm/test/CodeGen/RISCV/select-binop-identity.ll
@@ -174,10 +174,10 @@ define i64 @add_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    beqz a0, .LBB7_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    add a0, a4, a2
+; RV32I-NEXT:    add a2, a4, a2
 ; RV32I-NEXT:    add a1, a3, a1
-; RV32I-NEXT:    sltu a2, a1, a3
-; RV32I-NEXT:    add a4, a0, a2
+; RV32I-NEXT:    sltu a4, a1, a3
+; RV32I-NEXT:    add a4, a2, a4
 ; RV32I-NEXT:    mv a3, a1
 ; RV32I-NEXT:  .LBB7_2:
 ; RV32I-NEXT:    mv a0, a3

diff  --git a/llvm/test/CodeGen/RISCV/shadowcallstack.ll b/llvm/test/CodeGen/RISCV/shadowcallstack.ll
index 6b25b0d910c7..b72e994f03c0 100644
--- a/llvm/test/CodeGen/RISCV/shadowcallstack.ll
+++ b/llvm/test/CodeGen/RISCV/shadowcallstack.ll
@@ -88,9 +88,9 @@ define i32 @f4() shadowcallstack {
 ; RV32-NEXT:    call bar at plt
 ; RV32-NEXT:    mv s3, a0
 ; RV32-NEXT:    call bar at plt
-; RV32-NEXT:    add a1, s0, s1
+; RV32-NEXT:    add s0, s0, s1
 ; RV32-NEXT:    add a0, s3, a0
-; RV32-NEXT:    add a0, a1, a0
+; RV32-NEXT:    add a0, s0, a0
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/split-offsets.ll b/llvm/test/CodeGen/RISCV/split-offsets.ll
index 1d5e9aa1f432..411ac29b62e0 100644
--- a/llvm/test/CodeGen/RISCV/split-offsets.ll
+++ b/llvm/test/CodeGen/RISCV/split-offsets.ll
@@ -56,19 +56,19 @@ define void @test2([65536 x i32]** %sp, [65536 x i32]* %t, i32 %n) {
 ; RV32I-LABEL: test2:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    lw a4, 0(a0)
-; RV32I-NEXT:    lui a0, 20
-; RV32I-NEXT:    addi a5, a0, -1920
-; RV32I-NEXT:    add a0, a1, a5
-; RV32I-NEXT:    add a1, a4, a5
+; RV32I-NEXT:    lw a0, 0(a0)
+; RV32I-NEXT:    lui a4, 20
+; RV32I-NEXT:    addi a4, a4, -1920
+; RV32I-NEXT:    add a1, a1, a4
+; RV32I-NEXT:    add a0, a0, a4
 ; RV32I-NEXT:    bge a3, a2, .LBB1_2
 ; RV32I-NEXT:  .LBB1_1: # %while_body
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    addi a4, a3, 1
-; RV32I-NEXT:    sw a4, 0(a1)
-; RV32I-NEXT:    sw a3, 4(a1)
 ; RV32I-NEXT:    sw a4, 0(a0)
 ; RV32I-NEXT:    sw a3, 4(a0)
+; RV32I-NEXT:    sw a4, 0(a1)
+; RV32I-NEXT:    sw a3, 4(a1)
 ; RV32I-NEXT:    mv a3, a4
 ; RV32I-NEXT:    blt a3, a2, .LBB1_1
 ; RV32I-NEXT:  .LBB1_2: # %while_end
@@ -77,20 +77,20 @@ define void @test2([65536 x i32]** %sp, [65536 x i32]* %t, i32 %n) {
 ; RV64I-LABEL: test2:
 ; RV64I:       # %bb.0: # %entry
 ; RV64I-NEXT:    li a3, 0
-; RV64I-NEXT:    ld a4, 0(a0)
-; RV64I-NEXT:    lui a0, 20
-; RV64I-NEXT:    addiw a5, a0, -1920
-; RV64I-NEXT:    add a0, a1, a5
-; RV64I-NEXT:    add a1, a4, a5
+; RV64I-NEXT:    ld a0, 0(a0)
+; RV64I-NEXT:    lui a4, 20
+; RV64I-NEXT:    addiw a4, a4, -1920
+; RV64I-NEXT:    add a1, a1, a4
+; RV64I-NEXT:    add a0, a0, a4
 ; RV64I-NEXT:    sext.w a2, a2
 ; RV64I-NEXT:    bge a3, a2, .LBB1_2
 ; RV64I-NEXT:  .LBB1_1: # %while_body
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    addiw a4, a3, 1
-; RV64I-NEXT:    sw a4, 0(a1)
-; RV64I-NEXT:    sw a3, 4(a1)
 ; RV64I-NEXT:    sw a4, 0(a0)
 ; RV64I-NEXT:    sw a3, 4(a0)
+; RV64I-NEXT:    sw a4, 0(a1)
+; RV64I-NEXT:    sw a3, 4(a1)
 ; RV64I-NEXT:    mv a3, a4
 ; RV64I-NEXT:    blt a3, a2, .LBB1_1
 ; RV64I-NEXT:  .LBB1_2: # %while_end

diff  --git a/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll b/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
index cd2b7aa9f351..29d1374b3ea1 100644
--- a/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
+++ b/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
@@ -24,8 +24,8 @@ define iXLen2 @test_udiv_3(iXLen2 %x) nounwind {
 ; RV32-NEXT:    add a3, a6, a3
 ; RV32-NEXT:    sltu a0, a0, a2
 ; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    mul a0, a0, a4
-; RV32-NEXT:    add a1, a3, a0
+; RV32-NEXT:    mul a1, a0, a4
+; RV32-NEXT:    add a1, a3, a1
 ; RV32-NEXT:    mul a0, a5, a4
 ; RV32-NEXT:    ret
 ;
@@ -49,8 +49,8 @@ define iXLen2 @test_udiv_3(iXLen2 %x) nounwind {
 ; RV64-NEXT:    add a5, a6, a5
 ; RV64-NEXT:    sltu a0, a0, a3
 ; RV64-NEXT:    sub a0, a1, a0
-; RV64-NEXT:    mul a0, a0, a2
-; RV64-NEXT:    add a1, a5, a0
+; RV64-NEXT:    mul a1, a0, a2
+; RV64-NEXT:    add a1, a5, a1
 ; RV64-NEXT:    mul a0, a4, a2
 ; RV64-NEXT:    ret
   %a = udiv iXLen2 %x, 3
@@ -77,8 +77,8 @@ define iXLen2 @test_udiv_5(iXLen2 %x) nounwind {
 ; RV32-NEXT:    add a3, a6, a3
 ; RV32-NEXT:    sltu a0, a0, a2
 ; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    mul a0, a0, a4
-; RV32-NEXT:    add a1, a3, a0
+; RV32-NEXT:    mul a1, a0, a4
+; RV32-NEXT:    add a1, a3, a1
 ; RV32-NEXT:    mul a0, a5, a4
 ; RV32-NEXT:    ret
 ;
@@ -102,8 +102,8 @@ define iXLen2 @test_udiv_5(iXLen2 %x) nounwind {
 ; RV64-NEXT:    add a5, a6, a5
 ; RV64-NEXT:    sltu a0, a0, a3
 ; RV64-NEXT:    sub a0, a1, a0
-; RV64-NEXT:    mul a0, a0, a2
-; RV64-NEXT:    add a1, a5, a0
+; RV64-NEXT:    mul a1, a0, a2
+; RV64-NEXT:    add a1, a5, a1
 ; RV64-NEXT:    mul a0, a4, a2
 ; RV64-NEXT:    ret
   %a = udiv iXLen2 %x, 5
@@ -184,8 +184,8 @@ define iXLen2 @test_udiv_15(iXLen2 %x) nounwind {
 ; RV32-NEXT:    add a5, a6, a5
 ; RV32-NEXT:    sltu a0, a0, a2
 ; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    mul a0, a0, a4
-; RV32-NEXT:    add a1, a5, a0
+; RV32-NEXT:    mul a1, a0, a4
+; RV32-NEXT:    add a1, a5, a1
 ; RV32-NEXT:    mul a0, a3, a4
 ; RV32-NEXT:    ret
 ;
@@ -211,8 +211,8 @@ define iXLen2 @test_udiv_15(iXLen2 %x) nounwind {
 ; RV64-NEXT:    add a4, a6, a4
 ; RV64-NEXT:    sltu a0, a0, a2
 ; RV64-NEXT:    sub a0, a1, a0
-; RV64-NEXT:    mul a0, a0, a5
-; RV64-NEXT:    add a1, a4, a0
+; RV64-NEXT:    mul a1, a0, a5
+; RV64-NEXT:    add a1, a4, a1
 ; RV64-NEXT:    mul a0, a3, a5
 ; RV64-NEXT:    ret
   %a = udiv iXLen2 %x, 15
@@ -239,8 +239,8 @@ define iXLen2 @test_udiv_17(iXLen2 %x) nounwind {
 ; RV32-NEXT:    add a3, a6, a3
 ; RV32-NEXT:    sltu a0, a0, a2
 ; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    mul a0, a0, a4
-; RV32-NEXT:    add a1, a3, a0
+; RV32-NEXT:    mul a1, a0, a4
+; RV32-NEXT:    add a1, a3, a1
 ; RV32-NEXT:    mul a0, a5, a4
 ; RV32-NEXT:    ret
 ;
@@ -264,8 +264,8 @@ define iXLen2 @test_udiv_17(iXLen2 %x) nounwind {
 ; RV64-NEXT:    add a5, a6, a5
 ; RV64-NEXT:    sltu a0, a0, a3
 ; RV64-NEXT:    sub a0, a1, a0
-; RV64-NEXT:    mul a0, a0, a2
-; RV64-NEXT:    add a1, a5, a0
+; RV64-NEXT:    mul a1, a0, a2
+; RV64-NEXT:    add a1, a5, a1
 ; RV64-NEXT:    mul a0, a4, a2
 ; RV64-NEXT:    ret
   %a = udiv iXLen2 %x, 17
@@ -294,8 +294,8 @@ define iXLen2 @test_udiv_255(iXLen2 %x) nounwind {
 ; RV32-NEXT:    add a5, a6, a5
 ; RV32-NEXT:    sltu a0, a0, a2
 ; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    mul a0, a0, a4
-; RV32-NEXT:    add a1, a5, a0
+; RV32-NEXT:    mul a1, a0, a4
+; RV32-NEXT:    add a1, a5, a1
 ; RV32-NEXT:    mul a0, a3, a4
 ; RV32-NEXT:    ret
 ;
@@ -321,8 +321,8 @@ define iXLen2 @test_udiv_255(iXLen2 %x) nounwind {
 ; RV64-NEXT:    add a4, a6, a4
 ; RV64-NEXT:    sltu a0, a0, a2
 ; RV64-NEXT:    sub a0, a1, a0
-; RV64-NEXT:    mul a0, a0, a5
-; RV64-NEXT:    add a1, a4, a0
+; RV64-NEXT:    mul a1, a0, a5
+; RV64-NEXT:    add a1, a4, a1
 ; RV64-NEXT:    mul a0, a3, a5
 ; RV64-NEXT:    ret
   %a = udiv iXLen2 %x, 255
@@ -349,8 +349,8 @@ define iXLen2 @test_udiv_257(iXLen2 %x) nounwind {
 ; RV32-NEXT:    add a3, a6, a3
 ; RV32-NEXT:    sltu a0, a0, a2
 ; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    mul a0, a0, a4
-; RV32-NEXT:    add a1, a3, a0
+; RV32-NEXT:    mul a1, a0, a4
+; RV32-NEXT:    add a1, a3, a1
 ; RV32-NEXT:    mul a0, a5, a4
 ; RV32-NEXT:    ret
 ;
@@ -374,8 +374,8 @@ define iXLen2 @test_udiv_257(iXLen2 %x) nounwind {
 ; RV64-NEXT:    add a5, a6, a5
 ; RV64-NEXT:    sltu a0, a0, a3
 ; RV64-NEXT:    sub a0, a1, a0
-; RV64-NEXT:    mul a0, a0, a2
-; RV64-NEXT:    add a1, a5, a0
+; RV64-NEXT:    mul a1, a0, a2
+; RV64-NEXT:    add a1, a5, a1
 ; RV64-NEXT:    mul a0, a4, a2
 ; RV64-NEXT:    ret
   %a = udiv iXLen2 %x, 257
@@ -436,8 +436,8 @@ define iXLen2 @test_udiv_65535(iXLen2 %x) nounwind {
 ; RV64-NEXT:    add a5, a6, a5
 ; RV64-NEXT:    sltu a0, a0, a2
 ; RV64-NEXT:    sub a0, a1, a0
-; RV64-NEXT:    mul a0, a0, a4
-; RV64-NEXT:    add a1, a5, a0
+; RV64-NEXT:    mul a1, a0, a4
+; RV64-NEXT:    add a1, a5, a1
 ; RV64-NEXT:    mul a0, a3, a4
 ; RV64-NEXT:    ret
   %a = udiv iXLen2 %x, 65535
@@ -464,8 +464,8 @@ define iXLen2 @test_udiv_65537(iXLen2 %x) nounwind {
 ; RV32-NEXT:    sltu a0, a0, a2
 ; RV32-NEXT:    sub a0, a1, a0
 ; RV32-NEXT:    slli a1, a0, 16
-; RV32-NEXT:    sub a0, a0, a1
-; RV32-NEXT:    add a1, a4, a0
+; RV32-NEXT:    sub a1, a0, a1
+; RV32-NEXT:    add a1, a4, a1
 ; RV32-NEXT:    sub a0, a3, a5
 ; RV32-NEXT:    ret
 ;
@@ -491,8 +491,8 @@ define iXLen2 @test_udiv_65537(iXLen2 %x) nounwind {
 ; RV64-NEXT:    add a3, a6, a3
 ; RV64-NEXT:    sltu a0, a0, a2
 ; RV64-NEXT:    sub a0, a1, a0
-; RV64-NEXT:    mul a0, a0, a4
-; RV64-NEXT:    add a1, a3, a0
+; RV64-NEXT:    mul a1, a0, a4
+; RV64-NEXT:    add a1, a3, a1
 ; RV64-NEXT:    mul a0, a5, a4
 ; RV64-NEXT:    ret
   %a = udiv iXLen2 %x, 65537
@@ -523,8 +523,8 @@ define iXLen2 @test_udiv_12(iXLen2 %x) nounwind {
 ; RV32-NEXT:    add a3, a6, a3
 ; RV32-NEXT:    sltu a0, a0, a2
 ; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    mul a0, a0, a4
-; RV32-NEXT:    add a1, a3, a0
+; RV32-NEXT:    mul a1, a0, a4
+; RV32-NEXT:    add a1, a3, a1
 ; RV32-NEXT:    mul a0, a5, a4
 ; RV32-NEXT:    ret
 ;
@@ -552,8 +552,8 @@ define iXLen2 @test_udiv_12(iXLen2 %x) nounwind {
 ; RV64-NEXT:    add a5, a6, a5
 ; RV64-NEXT:    sltu a0, a0, a3
 ; RV64-NEXT:    sub a0, a1, a0
-; RV64-NEXT:    mul a0, a0, a2
-; RV64-NEXT:    add a1, a5, a0
+; RV64-NEXT:    mul a1, a0, a2
+; RV64-NEXT:    add a1, a5, a1
 ; RV64-NEXT:    mul a0, a4, a2
 ; RV64-NEXT:    ret
   %a = udiv iXLen2 %x, 12

diff  --git a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
index a45cb88adb5f..367095efdf39 100644
--- a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
@@ -423,12 +423,12 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __divsi3 at plt
 ; RV32I-NEXT:    add a0, s8, a0
-; RV32I-NEXT:    add a1, s7, s2
-; RV32I-NEXT:    add a2, s6, s3
-; RV32I-NEXT:    add a3, s5, s4
-; RV32I-NEXT:    sh a3, 6(s0)
-; RV32I-NEXT:    sh a2, 4(s0)
-; RV32I-NEXT:    sh a1, 2(s0)
+; RV32I-NEXT:    add s2, s7, s2
+; RV32I-NEXT:    add s3, s6, s3
+; RV32I-NEXT:    add s4, s5, s4
+; RV32I-NEXT:    sh s4, 6(s0)
+; RV32I-NEXT:    sh s3, 4(s0)
+; RV32I-NEXT:    sh s2, 2(s0)
 ; RV32I-NEXT:    sh a0, 0(s0)
 ; RV32I-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
index f42672efa9ba..7a8c4c04fd0e 100644
--- a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
+++ b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
@@ -54,40 +54,40 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
 define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ; RV32I-LABEL: func64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    add a3, a1, a5
-; RV32I-NEXT:    add a2, a0, a4
-; RV32I-NEXT:    sltu a0, a2, a0
-; RV32I-NEXT:    add a3, a3, a0
-; RV32I-NEXT:    beq a3, a1, .LBB1_2
+; RV32I-NEXT:    add a2, a1, a5
+; RV32I-NEXT:    add a4, a0, a4
+; RV32I-NEXT:    sltu a0, a4, a0
+; RV32I-NEXT:    add a2, a2, a0
+; RV32I-NEXT:    beq a2, a1, .LBB1_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    sltu a0, a3, a1
+; RV32I-NEXT:    sltu a0, a2, a1
 ; RV32I-NEXT:  .LBB1_2:
 ; RV32I-NEXT:    neg a1, a0
-; RV32I-NEXT:    or a0, a1, a2
-; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    or a0, a1, a4
+; RV32I-NEXT:    or a1, a1, a2
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: func64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    add a1, a0, a2
-; RV64I-NEXT:    sltu a0, a1, a0
+; RV64I-NEXT:    add a2, a0, a2
+; RV64I-NEXT:    sltu a0, a2, a0
 ; RV64I-NEXT:    neg a0, a0
-; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    or a0, a0, a2
 ; RV64I-NEXT:    ret
 ;
 ; RV32IZbb-LABEL: func64:
 ; RV32IZbb:       # %bb.0:
-; RV32IZbb-NEXT:    add a3, a1, a5
-; RV32IZbb-NEXT:    add a2, a0, a4
-; RV32IZbb-NEXT:    sltu a0, a2, a0
-; RV32IZbb-NEXT:    add a3, a3, a0
-; RV32IZbb-NEXT:    beq a3, a1, .LBB1_2
+; RV32IZbb-NEXT:    add a2, a1, a5
+; RV32IZbb-NEXT:    add a4, a0, a4
+; RV32IZbb-NEXT:    sltu a0, a4, a0
+; RV32IZbb-NEXT:    add a2, a2, a0
+; RV32IZbb-NEXT:    beq a2, a1, .LBB1_2
 ; RV32IZbb-NEXT:  # %bb.1:
-; RV32IZbb-NEXT:    sltu a0, a3, a1
+; RV32IZbb-NEXT:    sltu a0, a2, a1
 ; RV32IZbb-NEXT:  .LBB1_2:
 ; RV32IZbb-NEXT:    neg a1, a0
-; RV32IZbb-NEXT:    or a0, a1, a2
-; RV32IZbb-NEXT:    or a1, a1, a3
+; RV32IZbb-NEXT:    or a0, a1, a4
+; RV32IZbb-NEXT:    or a1, a1, a2
 ; RV32IZbb-NEXT:    ret
 ;
 ; RV64IZbb-LABEL: func64:

diff  --git a/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll b/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll
index 8e2c3f350df8..bf521e3663d0 100644
--- a/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll
+++ b/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll
@@ -37,11 +37,11 @@ define { i128, i8 } @muloti_test(i128 %l, i128 %r) #0 {
 ; RISCV32-NEXT:    add s4, s3, t1
 ; RISCV32-NEXT:    add t1, s0, s4
 ; RISCV32-NEXT:    sltu t2, t1, s0
-; RISCV32-NEXT:    sltu t6, s0, t6
+; RISCV32-NEXT:    sltu s0, s0, t6
 ; RISCV32-NEXT:    sltu t4, t5, t4
 ; RISCV32-NEXT:    mulhu t5, t3, a2
 ; RISCV32-NEXT:    add t4, t5, t4
-; RISCV32-NEXT:    add s0, t4, t6
+; RISCV32-NEXT:    add s0, t4, s0
 ; RISCV32-NEXT:    mul t4, t3, t0
 ; RISCV32-NEXT:    mul t5, a7, a5
 ; RISCV32-NEXT:    add t4, t5, t4

diff  --git a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
index b804e53b6a71..cd0c4efc41fc 100644
--- a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
@@ -413,12 +413,12 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __udivsi3 at plt
 ; RV32I-NEXT:    add a0, s8, a0
-; RV32I-NEXT:    add a1, s7, s2
-; RV32I-NEXT:    add a2, s6, s3
-; RV32I-NEXT:    add a3, s5, s4
-; RV32I-NEXT:    sh a3, 6(s0)
-; RV32I-NEXT:    sh a2, 4(s0)
-; RV32I-NEXT:    sh a1, 2(s0)
+; RV32I-NEXT:    add s2, s7, s2
+; RV32I-NEXT:    add s3, s6, s3
+; RV32I-NEXT:    add s4, s5, s4
+; RV32I-NEXT:    sh s4, 6(s0)
+; RV32I-NEXT:    sh s3, 4(s0)
+; RV32I-NEXT:    sh s2, 2(s0)
 ; RV32I-NEXT:    sh a0, 0(s0)
 ; RV32I-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload


        


More information about the llvm-commits mailing list